repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
derricw/pyqtgraph
examples/optics/pyoptic.py
18
18439
# -*- coding: utf-8 -*- import pyqtgraph as pg from pyqtgraph.Qt import QtGui, QtCore import numpy as np import csv, gzip, os from pyqtgraph import Point class GlassDB: """ Database of dispersion coefficients for Schott glasses + Corning 7980 """ def __init__(self, fileName='schott_glasses.csv'): path = os.path.dirname(__file__) fh = gzip.open(os.path.join(path, 'schott_glasses.csv.gz'), 'rb') r = csv.reader(map(str, fh.readlines())) lines = [x for x in r] self.data = {} header = lines[0] for l in lines[1:]: info = {} for i in range(1, len(l)): info[header[i]] = l[i] self.data[l[0]] = info self.data['Corning7980'] = { ## Thorlabs UV fused silica--not in schott catalog. 'B1': 0.68374049400, 'B2': 0.42032361300, 'B3': 0.58502748000, 'C1': 0.00460352869, 'C2': 0.01339688560, 'C3': 64.49327320000, 'TAUI25/250': 0.95, ## transmission data is fabricated, but close. 'TAUI25/1400': 0.98, } for k in self.data: self.data[k]['ior_cache'] = {} def ior(self, glass, wl): """ Return the index of refraction for *glass* at wavelength *wl*. The *glass* argument must be a key in self.data. """ info = self.data[glass] cache = info['ior_cache'] if wl not in cache: B = list(map(float, [info['B1'], info['B2'], info['B3']])) C = list(map(float, [info['C1'], info['C2'], info['C3']])) w2 = (wl/1000.)**2 n = np.sqrt(1.0 + (B[0]*w2 / (w2-C[0])) + (B[1]*w2 / (w2-C[1])) + (B[2]*w2 / (w2-C[2]))) cache[wl] = n return cache[wl] def transmissionCurve(self, glass): data = self.data[glass] keys = [int(x[7:]) for x in data.keys() if 'TAUI25' in x] keys.sort() curve = np.empty((2,len(keys))) for i in range(len(keys)): curve[0][i] = keys[i] key = 'TAUI25/%d' % keys[i] val = data[key] if val == '': val = 0 else: val = float(val) curve[1][i] = val return curve GLASSDB = GlassDB() def wlPen(wl): """Return a pen representing the given wavelength""" l1 = 400 l2 = 700 hue = np.clip(((l2-l1) - (wl-l1)) * 0.8 / (l2-l1), 0, 0.8) val = 1.0 if wl > 700: val = 1.0 * (((700-wl)/700.) + 1) elif wl < 400: val = wl * 1.0/400. #print hue, val color = pg.hsvColor(hue, 1.0, val) pen = pg.mkPen(color) return pen class ParamObj: # Just a helper for tracking parameters and responding to changes def __init__(self): self.__params = {} def __setitem__(self, item, val): self.setParam(item, val) def setParam(self, param, val): self.setParams(**{param:val}) def setParams(self, **params): """Set parameters for this optic. This is a good function to override for subclasses.""" self.__params.update(params) self.paramStateChanged() def paramStateChanged(self): pass def __getitem__(self, item): return self.getParam(item) def getParam(self, param): return self.__params[param] class Optic(pg.GraphicsObject, ParamObj): sigStateChanged = QtCore.Signal() def __init__(self, gitem, **params): ParamObj.__init__(self) pg.GraphicsObject.__init__(self) #, [0,0], [1,1]) self.gitem = gitem self.surfaces = gitem.surfaces gitem.setParentItem(self) self.roi = pg.ROI([0,0], [1,1]) self.roi.addRotateHandle([1, 1], [0.5, 0.5]) self.roi.setParentItem(self) defaults = { 'pos': Point(0,0), 'angle': 0, } defaults.update(params) self._ior_cache = {} self.roi.sigRegionChanged.connect(self.roiChanged) self.setParams(**defaults) def updateTransform(self): self.resetTransform() self.setPos(0, 0) self.translate(Point(self['pos'])) self.rotate(self['angle']) def setParam(self, param, val): ParamObj.setParam(self, param, val) def paramStateChanged(self): """Some parameters of the optic have changed.""" # Move graphics item self.gitem.setPos(Point(self['pos'])) self.gitem.resetTransform() self.gitem.rotate(self['angle']) # Move ROI to match try: self.roi.sigRegionChanged.disconnect(self.roiChanged) br = self.gitem.boundingRect() o = self.gitem.mapToParent(br.topLeft()) self.roi.setAngle(self['angle']) self.roi.setPos(o) self.roi.setSize([br.width(), br.height()]) finally: self.roi.sigRegionChanged.connect(self.roiChanged) self.sigStateChanged.emit() def roiChanged(self, *args): pos = self.roi.pos() # rotate gitem temporarily so we can decide where it will need to move self.gitem.resetTransform() self.gitem.rotate(self.roi.angle()) br = self.gitem.boundingRect() o1 = self.gitem.mapToParent(br.topLeft()) self.setParams(angle=self.roi.angle(), pos=pos + (self.gitem.pos() - o1)) def boundingRect(self): return QtCore.QRectF() def paint(self, p, *args): pass def ior(self, wavelength): return GLASSDB.ior(self['glass'], wavelength) class Lens(Optic): def __init__(self, **params): defaults = { 'dia': 25.4, ## diameter of lens 'r1': 50., ## positive means convex, use 0 for planar 'r2': 0, ## negative means convex 'd': 4.0, 'glass': 'N-BK7', 'reflect': False, } defaults.update(params) d = defaults.pop('d') defaults['x1'] = -d/2. defaults['x2'] = d/2. gitem = CircularSolid(brush=(100, 100, 130, 100), **defaults) Optic.__init__(self, gitem, **defaults) def propagateRay(self, ray): """Refract, reflect, absorb, and/or scatter ray. This function may create and return new rays""" """ NOTE:: We can probably use this to compute refractions faster: (from GLSL 120 docs) For the incident vector I and surface normal N, and the ratio of indices of refraction eta, return the refraction vector. The result is computed by k = 1.0 - eta * eta * (1.0 - dot(N, I) * dot(N, I)) if (k < 0.0) return genType(0.0) else return eta * I - (eta * dot(N, I) + sqrt(k)) * N The input parameters for the incident vector I and the surface normal N must already be normalized to get the desired results. eta == ratio of IORs For reflection: For the incident vector I and surface orientation N, returns the reflection direction: I – 2 ∗ dot(N, I) ∗ N N must already be normalized in order to achieve the desired result. """ iors = [self.ior(ray['wl']), 1.0] for i in [0,1]: surface = self.surfaces[i] ior = iors[i] p1, ai = surface.intersectRay(ray) #print "surface intersection:", p1, ai*180/3.14159 #trans = self.sceneTransform().inverted()[0] * surface.sceneTransform() #p1 = trans.map(p1) if p1 is None: ray.setEnd(None) break p1 = surface.mapToItem(ray, p1) #print "adjusted position:", p1 #ior = self.ior(ray['wl']) rd = ray['dir'] a1 = np.arctan2(rd[1], rd[0]) ar = a1 - ai + np.arcsin((np.sin(ai) * ray['ior'] / ior)) #print [x for x in [a1, ai, (np.sin(ai) * ray['ior'] / ior), ar]] #print ai, np.sin(ai), ray['ior'], ior ray.setEnd(p1) dp = Point(np.cos(ar), np.sin(ar)) #p2 = p1+dp #p1p = self.mapToScene(p1) #p2p = self.mapToScene(p2) #dpp = Point(p2p-p1p) ray = Ray(parent=ray, ior=ior, dir=dp) return [ray] class Mirror(Optic): def __init__(self, **params): defaults = { 'r1': 0, 'r2': 0, 'd': 0.01, } defaults.update(params) d = defaults.pop('d') defaults['x1'] = -d/2. defaults['x2'] = d/2. gitem = CircularSolid(brush=(100,100,100,255), **defaults) Optic.__init__(self, gitem, **defaults) def propagateRay(self, ray): """Refract, reflect, absorb, and/or scatter ray. This function may create and return new rays""" surface = self.surfaces[0] p1, ai = surface.intersectRay(ray) if p1 is not None: p1 = surface.mapToItem(ray, p1) rd = ray['dir'] a1 = np.arctan2(rd[1], rd[0]) ar = a1 + np.pi - 2*ai ray.setEnd(p1) dp = Point(np.cos(ar), np.sin(ar)) ray = Ray(parent=ray, dir=dp) else: ray.setEnd(None) return [ray] class CircularSolid(pg.GraphicsObject, ParamObj): """GraphicsObject with two circular or flat surfaces.""" def __init__(self, pen=None, brush=None, **opts): """ Arguments for each surface are: x1,x2 - position of center of _physical surface_ r1,r2 - radius of curvature d1,d2 - diameter of optic """ defaults = dict(x1=-2, r1=100, d1=25.4, x2=2, r2=100, d2=25.4) defaults.update(opts) ParamObj.__init__(self) self.surfaces = [CircleSurface(defaults['r1'], defaults['d1']), CircleSurface(-defaults['r2'], defaults['d2'])] pg.GraphicsObject.__init__(self) for s in self.surfaces: s.setParentItem(self) if pen is None: self.pen = pg.mkPen((220,220,255,200), width=1, cosmetic=True) else: self.pen = pg.mkPen(pen) if brush is None: self.brush = pg.mkBrush((230, 230, 255, 30)) else: self.brush = pg.mkBrush(brush) self.setParams(**defaults) def paramStateChanged(self): self.updateSurfaces() def updateSurfaces(self): self.surfaces[0].setParams(self['r1'], self['d1']) self.surfaces[1].setParams(-self['r2'], self['d2']) self.surfaces[0].setPos(self['x1'], 0) self.surfaces[1].setPos(self['x2'], 0) self.path = QtGui.QPainterPath() self.path.connectPath(self.surfaces[0].path.translated(self.surfaces[0].pos())) self.path.connectPath(self.surfaces[1].path.translated(self.surfaces[1].pos()).toReversed()) self.path.closeSubpath() def boundingRect(self): return self.path.boundingRect() def shape(self): return self.path def paint(self, p, *args): p.setRenderHints(p.renderHints() | p.Antialiasing) p.setPen(self.pen) p.fillPath(self.path, self.brush) p.drawPath(self.path) class CircleSurface(pg.GraphicsObject): def __init__(self, radius=None, diameter=None): """center of physical surface is at 0,0 radius is the radius of the surface. If radius is None, the surface is flat. diameter is of the optic's edge.""" pg.GraphicsObject.__init__(self) self.r = radius self.d = diameter self.mkPath() def setParams(self, r, d): self.r = r self.d = d self.mkPath() def mkPath(self): self.prepareGeometryChange() r = self.r d = self.d h2 = d/2. self.path = QtGui.QPainterPath() if r == 0: ## flat surface self.path.moveTo(0, h2) self.path.lineTo(0, -h2) else: ## half-height of surface can't be larger than radius h2 = min(h2, abs(r)) #dx = abs(r) - (abs(r)**2 - abs(h2)**2)**0.5 #p.moveTo(-d*w/2.+ d*dx, d*h2) arc = QtCore.QRectF(0, -r, r*2, r*2) #self.surfaces.append((arc.center(), r, h2)) a1 = np.arcsin(h2/r) * 180. / np.pi a2 = -2*a1 a1 += 180. self.path.arcMoveTo(arc, a1) self.path.arcTo(arc, a1, a2) #if d == -1: #p1 = QtGui.QPainterPath() #p1.addRect(arc) #self.paths.append(p1) self.h2 = h2 def boundingRect(self): return self.path.boundingRect() def paint(self, p, *args): return ## usually we let the optic draw. #p.setPen(pg.mkPen('r')) #p.drawPath(self.path) def intersectRay(self, ray): ## return the point of intersection and the angle of incidence #print "intersect ray" h = self.h2 r = self.r p, dir = ray.currentState(relativeTo=self) # position and angle of ray in local coords. #print " ray: ", p, dir p = p - Point(r, 0) ## move position so center of circle is at 0,0 #print " adj: ", p, r if r == 0: #print " flat" if dir[0] == 0: y = 0 else: y = p[1] - p[0] * dir[1]/dir[0] if abs(y) > h: return None, None else: return (Point(0, y), np.arctan2(dir[1], dir[0])) else: #print " curve" ## find intersection of circle and line (quadratic formula) dx = dir[0] dy = dir[1] dr = (dx**2 + dy**2) ** 0.5 D = p[0] * (p[1]+dy) - (p[0]+dx) * p[1] idr2 = 1.0 / dr**2 disc = r**2 * dr**2 - D**2 if disc < 0: return None, None disc2 = disc**0.5 if dy < 0: sgn = -1 else: sgn = 1 br = self.path.boundingRect() x1 = (D*dy + sgn*dx*disc2) * idr2 y1 = (-D*dx + abs(dy)*disc2) * idr2 if br.contains(x1+r, y1): pt = Point(x1, y1) else: x2 = (D*dy - sgn*dx*disc2) * idr2 y2 = (-D*dx - abs(dy)*disc2) * idr2 pt = Point(x2, y2) if not br.contains(x2+r, y2): return None, None raise Exception("No intersection!") norm = np.arctan2(pt[1], pt[0]) if r < 0: norm += np.pi #print " norm:", norm*180/3.1415 dp = p - pt #print " dp:", dp ang = np.arctan2(dp[1], dp[0]) #print " ang:", ang*180/3.1415 #print " ai:", (ang-norm)*180/3.1415 #print " intersection:", pt return pt + Point(r, 0), ang-norm class Ray(pg.GraphicsObject, ParamObj): """Represents a single straight segment of a ray""" sigStateChanged = QtCore.Signal() def __init__(self, **params): ParamObj.__init__(self) defaults = { 'ior': 1.0, 'wl': 500, 'end': None, 'dir': Point(1,0), } self.params = {} pg.GraphicsObject.__init__(self) self.children = [] parent = params.get('parent', None) if parent is not None: defaults['start'] = parent['end'] defaults['wl'] = parent['wl'] self['ior'] = parent['ior'] self['dir'] = parent['dir'] parent.addChild(self) defaults.update(params) defaults['dir'] = Point(defaults['dir']) self.setParams(**defaults) self.mkPath() def clearChildren(self): for c in self.children: c.clearChildren() c.setParentItem(None) self.scene().removeItem(c) self.children = [] def paramStateChanged(self): pass def addChild(self, ch): self.children.append(ch) ch.setParentItem(self) def currentState(self, relativeTo=None): pos = self['start'] dir = self['dir'] if relativeTo is None: return pos, dir else: trans = self.itemTransform(relativeTo)[0] p1 = trans.map(pos) p2 = trans.map(pos + dir) return Point(p1), Point(p2-p1) def setEnd(self, end): self['end'] = end self.mkPath() def boundingRect(self): return self.path.boundingRect() def paint(self, p, *args): #p.setPen(pg.mkPen((255,0,0, 150))) p.setRenderHints(p.renderHints() | p.Antialiasing) p.setCompositionMode(p.CompositionMode_Plus) p.setPen(wlPen(self['wl'])) p.drawPath(self.path) def mkPath(self): self.prepareGeometryChange() self.path = QtGui.QPainterPath() self.path.moveTo(self['start']) if self['end'] is not None: self.path.lineTo(self['end']) else: self.path.lineTo(self['start']+500*self['dir']) def trace(rays, optics): if len(optics) < 1 or len(rays) < 1: return for r in rays: r.clearChildren() o = optics[0] r2 = o.propagateRay(r) trace(r2, optics[1:]) class Tracer(QtCore.QObject): """ Simple ray tracer. Initialize with a list of rays and optics; calling trace() will cause rays to be extended by propagating them through each optic in sequence. """ def __init__(self, rays, optics): QtCore.QObject.__init__(self) self.optics = optics self.rays = rays for o in self.optics: o.sigStateChanged.connect(self.trace) self.trace() def trace(self): trace(self.rays, self.optics)
mit
ktan2020/legacy-automation
win/Lib/bsddb/test/test_fileid.py
7
1891
"""TestCase for reseting File ID. """ import os import shutil import unittest from test_all import db, test_support, get_new_environment_path, get_new_database_path class FileidResetTestCase(unittest.TestCase): def setUp(self): self.db_path_1 = get_new_database_path() self.db_path_2 = get_new_database_path() self.db_env_path = get_new_environment_path() def test_fileid_reset(self): # create DB 1 self.db1 = db.DB() self.db1.open(self.db_path_1, dbtype=db.DB_HASH, flags=(db.DB_CREATE|db.DB_EXCL)) self.db1.put('spam', 'eggs') self.db1.close() shutil.copy(self.db_path_1, self.db_path_2) self.db2 = db.DB() self.db2.open(self.db_path_2, dbtype=db.DB_HASH) self.db2.put('spam', 'spam') self.db2.close() self.db_env = db.DBEnv() self.db_env.open(self.db_env_path, db.DB_CREATE|db.DB_INIT_MPOOL) # use fileid_reset() here self.db_env.fileid_reset(self.db_path_2) self.db1 = db.DB(self.db_env) self.db1.open(self.db_path_1, dbtype=db.DB_HASH, flags=db.DB_RDONLY) self.assertEqual(self.db1.get('spam'), 'eggs') self.db2 = db.DB(self.db_env) self.db2.open(self.db_path_2, dbtype=db.DB_HASH, flags=db.DB_RDONLY) self.assertEqual(self.db2.get('spam'), 'spam') self.db1.close() self.db2.close() self.db_env.close() def tearDown(self): test_support.unlink(self.db_path_1) test_support.unlink(self.db_path_2) test_support.rmtree(self.db_env_path) def test_suite(): suite = unittest.TestSuite() if db.version() >= (4, 4): suite.addTest(unittest.makeSuite(FileidResetTestCase)) return suite if __name__ == '__main__': unittest.main(defaultTest='test_suite')
mit
hrharkins/flask_pillow
flask_pillow.py
1
11312
import flask, json, sys try: from flask import _app_ctx_stack as stack except ImportError: from flask import _request_ctx_stack as stack class Pillow(object): factories = {} def __init__(self, app=None, *factories, **_kw): self.factory_filter = _kw.pop('factories', None) self.app = app self.cases = {} self.factories = dict(self.factories) self.auto_setup = _kw if app is not None: self.init_app(app) def init_app(self, app): self.app = app self.setup(app) if hasattr(app, 'teardown_appcontext'): app.teardown_appcontext(self.teardown) else: app.teardown_request(self.teardown) def setup(self, app): factories = app.config.setdefault('PILLOW_FACTORIES', {}) factory_filter = self.factory_filter for factory in self.factories: if not factory_filter or factory in factory_filter: if not factories.get(factory): factories[factory] = True self.factories[factory](self, app) configured = app.config.setdefault('PILLOW_CASES', {}) for pattern in self.cases: case = self.cases[pattern] if not configured.get(pattern): configured[pattern] = case return def teardown(self, exception): ctx = stack.top def factory(_self, _fn=None, *_default_mimetypes, **_kw): if _fn is None: return lambda f: _self.factory(f, **_kw) elif isinstance(_fn, basestring): return lambda f: _self.factory(f, _fn, *_default_mimetypes, **_kw) else: if not _default_mimetypes: raise TypeError('Must specify the mimetypes factory handles') _self.factories[_fn.__name__] = \ lambda self, app: _fn(self, app, _default_mimetypes, **_kw) @classmethod def default_factory(_cls, _fn=None, *_default_mimetypes, **_kw): if _fn is None: return lambda f: _cls.default_factory(f, **_kw) elif isinstance(_fn, basestring): return lambda f: _cls.default_factory \ (f, _fn, *_default_mimetypes, **_kw) else: if not _default_mimetypes: raise TypeError('Must specify the mimetypes factory handles') _cls.factories[_fn.__name__] = \ lambda self, app: _fn(self, app, _default_mimetypes, **_kw) def case(_self, _fn=None, *_mimetypes, **_kw): '''Define a rendering handler for mimetypes. This wraps up the handler fn using the keyword arguments for translation of the general restify() call to the specific handler. Each name in the kwargs represents a destination name that handler accepts and the associated vaule names something the keyword arguments to restify that the value is mapped from. The special value '?' denotes the "entity" positional argument provided to restify(). For example: REST = RESTify(app) REST.restifarian(json.dumps, 'application/json', 'text/json', obj='?') Maps the entity object to the obj (first) argument of json.dumps. If restify is called thusly: @app.route(...) def handler(): return restify({'who': 'world'}) The result would be the JSON representation if that is the best acceptable type from the Accept header or content-type in the GET/POST values. The special destination '__kw__' determines what's provided as generic keyword arguments rather than a specific value. Any source name can also be '*', which refers to the non-translated keyword arguments of the restify call: @REST.restifarian(__template='html_template', __source='html_source', __kw__='*'): def html_serialize(__template=None, __source=None, **_kw): if __template is not None: if __source is not None: raise ValueError('Cannot have both __source and __template') else: return render_template(__template, **_kw) elif __source is not None: return render_template_string(__source, **_kw) else: raise ValueError('Must specify either __source or __template') ''' if _fn is None: return lambda fn: \ _self.restifarian(fn, *_mimetypes, **_kw) elif isinstance(_fn, str): return lambda fn: \ _self.restifarian(fn, _fn, *_mimetypes, **_kw) else: _self.make_case(_fn, _mimetypes, **_kw) if _self.app is not None: _self.setup(_self.app) def make_case(self, fn, mimetypes, cls=object, **_kw): handler = make_translator(fn, '_entity', **_kw) for mimetype in mimetypes: for base in cls.__mro__: pattern = (mimetype, base.__name__) self.cases[pattern] = handler return fn @classmethod def pillow(_cls, entity, _name='rest', default=None, request=None, **_kw): '''Serialize the returned object based on Accept header. This method will use the config from the current_app to determine what to do. It uses the RESTIFARIAN config element and tries to find keys matching the following (in order): 1. The "content-type" GET or POST string. 1. The mimetypes in the "Accept" header, in order. The first mimetype with a handler wins. ''' app = flask.current_app if request is None: request = flask.request override = flask.request.values.get('content-type') override = override or flask.request.values.get('content_type') config = app.config.get('PILLOW_CASES') errors = {} if config is not None: if _name is not None: _kw[_name] = entity for pattern in _cls.permute(request, type(entity), override): handler = config.get(pattern) if handler is not None: try: result = handler(entity, **_kw) if result is not _cls.CONTINUE: return result except Exception, e: errors[pattern] = sys.exc_info() else: if default is not None: return default(entity, **_kw) if errors: for pattern, exc_info in errors.iteritems(): raise exc_info[1], None, exc_info[2] if override is None: mimetypes = [mimetype[0] for mimetype in request.accept_mimetypes] else: mimetypes = [override] raise TypeError('Could not render %r into %s' % (type(entity).__name__, ', '.join(repr(t) for t in mimetypes))) CONTINUE = 'CONTINUE' @classmethod def permute(cls, request, entity_cls, override=None): if override is None: mimetypes = [mimetype[0] for mimetype in request.accept_mimetypes] elif isinstance(override, (list, tuple, set)): mimetypes = override else: mimetypes = [override] for mimetype in mimetypes: for base in entity_cls.__mro__: yield mimetype, base.__name__ pillow = Pillow.pillow def make_translator(_fn, *_args, **_xlate): '''Creates a wrapper function on-the-fly to translate arguments. ''' fn_args = list(_args) call_assign = [] call_kwarg = None for dest in _xlate: src = _xlate[dest] if dest == '__kw__': if src == '*': call_kwarg = '**__kw__' else: call_kwarg = '**' + src else: if src is True: src = dest elif src is False: continue if src.endswith('?'): arg = int(src[:-1] or '0') call_assign.append('translated[%r] = %s' % (dest, _args[arg])) elif src == '*': call_assign.append('translated[%r] = __kw__' % dest) else: fn_args.append(src + '=type(None)') call_assign.append('if %s is not type(None): ' 'translated[%r] = %s' % (src, dest, src)) fn_args.append('**__kw__') fn_src = ['def %s(%s):' % (_fn.__name__, ', '.join(fn_args))] if _fn.__doc__: fn_src.append(' %r' % _fn.__doc__) fn_src.append(' translated = {}') for assign in call_assign: fn_src.append(' ' + assign) if call_kwarg: fn_src.append(' translated.update(%s)' % call_kwarg) fn_src.append(' return _fn(**translated)') l = dict(_fn=_fn) exec '\n'.join(fn_src) in l return l[_fn.__name__] def templatize(_entity, _template=None, _source=None, **_kw): if _template is not None: return flask.render_template(_template, **_kw) elif _source is not None: return flask.render_template_string(_source, **_kw) else: raise ValueError('Either *_template or *_source must be set') @Pillow.default_factory('json', 'text/json', 'application/json') def to_json(pillow, app, mimetypes, mimetype_override=None, serialize_objects=None, **_kw): mimetypes = mimetype_override or app.config.get('json-types', mimetypes) serialize_objects = ( app.config.get('json-serialize-objects', True) if serialize_objects is None else serialize_objects) if serialize_objects: def json_dumps(*_args, **_kw): return json.dumps(*_args, default=lambda o: o.to_json(), **_kw) else: json_dumps = json.dumps pillow.case(json_dumps, *mimetypes, obj='?') @Pillow.default_factory('yaml', 'text/yaml', 'application/yaml') def to_yaml(pillow, app, mimetypes, mimetype_override=None, **_kw): mimetypes = mimetype_override or app.config.get('yaml-types', mimetypes) try: import yaml pillow.case(yaml.dump, *mimetypes, data='?') except ImportError: def fail(*_args, **_kw): raise ImportError('yaml was not available') pillow.case(fail) @Pillow.default_factory('html', 'text/html') def to_html(pillow, app, mimetypes, mimetype_override=None, **_kw): mimetypes = mimetype_override or app.config.get('html-types', mimetypes) pillow.case(templatize, *mimetypes, _template='html_template', _source='html_source', _entity='?', __kw__='*') @Pillow.default_factory('xml', 'text/xml') def to_xml(pillow, app, mimetypes, mimetype_override=None, **_kw): mimetypes = mimetype_override or app.config.get('xml-types', mimetypes) pillow.case(templatize, *mimetypes, _template='xml_template', _source='xml_source', _entity='?', __kw__='*')
mit
alfa-jor/addon
plugin.video.alfa/servers/debriders/alldebrid.py
1
1695
# -*- coding: utf-8 -*- from core import httptools from core import scrapertools from platformcode import logger # Returns an array of possible video url's from the page_url def get_video_url(page_url, premium=False, user="", password="", video_password=""): logger.info() page_url = correct_url(page_url) dd1 = httptools.downloadpage("https://api.alldebrid.com/user/login?agent=mySoft&username=%s&password=%s" %(user, password)).data token = scrapertools.find_single_match(dd1, 'token":"([^"]+)') dd2 = httptools.downloadpage("https://api.alldebrid.com/link/unlock?agent=mySoft&token=%s&link=%s" %(token, page_url)).data link = scrapertools.find_single_match(dd2, 'link":"([^"]+)') link = link.replace("\\","") video_urls = [] if link: extension = "mp4 [alldebrid]" video_urls.append([extension, link]) else: try: server_error = "Alldebrid: " + data["error"].decode("utf-8", "ignore") server_error = server_error.replace("This link isn't available on the hoster website.", "Enlace no disponible en el servidor de descarga") \ .replace("Hoster unsupported or under maintenance.", "Servidor no soportado o en mantenimiento") except: server_error = "Alldebrid: Error en el usuario/password o en la web" video_urls.append([server_error, '']) return video_urls def correct_url(url): if "userporn.com" in url: url = url.replace("/e/", "/video/") if "putlocker" in url: url = url.replace("/embed/", "/file/") return url
gpl-3.0
minhtuancn/odoo
addons/sale_crm/__openerp__.py
260
2036
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Opportunity to Quotation', 'version': '1.0', 'category': 'Hidden', 'description': """ This module adds a shortcut on one or several opportunity cases in the CRM. =========================================================================== This shortcut allows you to generate a sales order based on the selected case. If different cases are open (a list), it generates one sale order by case. The case is then closed and linked to the generated sales order. We suggest you to install this module, if you installed both the sale and the crm modules. """, 'author': 'OpenERP SA', 'website': 'https://www.odoo.com/page/crm', 'depends': ['sale', 'crm', 'web_kanban_gauge'], 'data': [ 'wizard/crm_make_sale_view.xml', 'sale_crm_view.xml', 'security/sale_crm_security.xml', 'security/ir.model.access.csv', ], 'demo': [], 'test': ['test/sale_crm.yml'], 'installable': True, 'auto_install': True, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
francisliu/hbase
dev-support/git-jira-release-audit/git_jira_release_audit.py
1
30172
#!/usr/bin/env python3 # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Build a database from git commit histories. Can be used to audit git vs. jira. For usage, # see README.md. """An application to assist Release Managers with ensuring that histories in Git and fixVersions in JIRA are in agreement. See README.md for a detailed explanation. """ import argparse import csv import enum import logging import pathlib import re import sqlite3 import time import os import enlighten import git import jira LOG = logging.getLogger(os.path.basename(__file__)) class _DB: """Manages an instance of Sqlite on behalf of the application. Args: db_path (str): Path to the Sqlite database file. ':memory:' for an ephemeral database. **_kwargs: Convenience for CLI argument parsing. Ignored. Attributes: conn (:obj:`sqlite3.db2api.Connection`): The underlying connection object. """ SQL_LOG = LOG.getChild("sql") class Action(enum.Enum): """Describes an action to be taken against the database.""" ADD = 'ADD' REVERT = 'REVERT' SKIP = 'SKIP' def __init__(self, db_path, initialize_db, **_kwargs): self._conn = sqlite3.connect(db_path) self._conn.set_trace_callback(_DB.log_query) if initialize_db: for table in 'git_commits', 'jira_versions': self._conn.execute("DROP TABLE IF EXISTS %s" % table) self._conn.execute(""" CREATE TABLE IF NOT EXISTS "git_commits"( jira_id TEXT NOT NULL, branch TEXT NOT NULL, git_sha TEXT NOT NULL, git_tag TEXT, CONSTRAINT pk PRIMARY KEY (jira_id, branch, git_sha) );""") self._conn.execute(""" CREATE TABLE IF NOT EXISTS "jira_versions"( jira_id TEXT NOT NULL, fix_version TEXT NOT NULL, CONSTRAINT pk PRIMARY KEY (jira_id, fix_version) );""") self._conn.commit() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self._conn.close() @staticmethod def log_query(query): _DB.SQL_LOG.debug(re.sub(r'\s+', ' ', query).strip()) @property def conn(self): """:obj:`sqlite3.db2api.Connection`: Underlying database handle.""" return self._conn def apply_commit(self, action, jira_id, branch, git_sha): """Apply an edit to the commits database. Args: action (:obj:`_DB.Action`): The action to execute. jira_id (str): The applicable Issue ID from JIRA. branch (str): The name of the git branch from which the commit originates. git_sha (str): The commit's SHA. """ if action == _DB.Action.ADD: self.conn.execute( "INSERT INTO git_commits(jira_id, branch, git_sha) VALUES (upper(?),?,?)", (jira_id, branch, git_sha)) elif action == _DB.Action.REVERT: self.conn.execute(""" DELETE FROM git_commits WHERE jira_id=upper(?) AND branch=? """, (jira_id.upper(), branch)) def flush_commits(self): """Commit any pending changes to the database.""" self.conn.commit() def apply_git_tag(self, branch, git_sha, git_tag): """Annotate a commit in the commits database as being a part of the specified release. Args: branch (str): The name of the git branch from which the commit originates. git_sha (str): The commit's SHA. git_tag (str): The first release tag following the commit. """ self.conn.execute("UPDATE git_commits SET git_tag = ? WHERE branch = ? AND git_sha = ?", (git_tag, branch, git_sha)) def apply_fix_version(self, jira_id, fix_version): """Annotate a Jira issue in the jira database as being part of the specified release version. Args: jira_id (str): The applicable Issue ID from JIRA. fix_version (str): The annotated `fixVersion` as seen in JIRA. """ self.conn.execute("INSERT INTO jira_versions(jira_id, fix_version) VALUES (upper(?),?)", (jira_id, fix_version)) def unique_jira_ids_from_git(self): """Query the commits database for the population of Jira Issue IDs.""" results = self.conn.execute("SELECT distinct jira_id FROM git_commits").fetchall() return [x[0] for x in results] def backup(self, target): """Write a copy of the database to the `target` destination. Args: target (str): The backup target, a filesystem path. """ dst = sqlite3.connect(target) with dst: self._conn.backup(dst) dst.close() class _RepoReader: """This class interacts with the git repo, and encapsulates actions specific to HBase's git history. Args: db (:obj:`_DB`): A handle to the database manager. fallback_actions_path (str): Path to the file containing sha-specific actions (see README.md). remote_name (str): The name of the remote to query for branches and histories (i.e., "origin"). development_branch (str): The name of the branch on which active development occurs (i.e., "master"). release_line_regexp (str): Filter criteria used to select "release line" branches (such as "branch-1," "branch-2," &c.). **_kwargs: Convenience for CLI argument parsing. Ignored. """ _extract_release_tag_pattern = re.compile(r'^rel/(\d+\.\d+\.\d+)(\^0)?$', re.IGNORECASE) _skip_patterns = [ re.compile(r'^preparing development version.+', re.IGNORECASE), re.compile(r'^preparing hbase release.+', re.IGNORECASE), re.compile(r'^\s*updated? pom.xml version (for|to) .+', re.IGNORECASE), re.compile(r'^\s*updated? chang', re.IGNORECASE), re.compile(r'^\s*updated? (book|docs|documentation)', re.IGNORECASE), re.compile(r'^\s*updating (docs|changes).+', re.IGNORECASE), re.compile(r'^\s*bump (pom )?versions?', re.IGNORECASE), re.compile(r'^\s*updated? (version|poms|changes).+', re.IGNORECASE), ] _identify_leading_jira_id_pattern = re.compile(r'^[\s\[]*(hbase-\d+)', re.IGNORECASE) _identify_backport_jira_id_patterns = [ re.compile(r'^backport "(.+)".*', re.IGNORECASE), re.compile(r'^backport (.+)', re.IGNORECASE), ] _identify_revert_jira_id_pattern = re.compile(r'^revert:? "(.+)"', re.IGNORECASE) _identify_revert_revert_jira_id_pattern = re.compile( '^revert "revert "(.+)"\\.?"\\.?', re.IGNORECASE) _identify_amend_jira_id_pattern = re.compile(r'^amend (.+)', re.IGNORECASE) def __init__(self, db, fallback_actions_path, remote_name, development_branch, release_line_regexp, branch_filter_regexp, parse_release_tags, **_kwargs): self._db = db self._repo = _RepoReader._open_repo() self._fallback_actions = _RepoReader._load_fallback_actions(fallback_actions_path) self._remote_name = remote_name self._development_branch = development_branch self._release_line_regexp = release_line_regexp self._branch_filter_regexp = branch_filter_regexp self._parse_release_tags = parse_release_tags @property def repo(self): """:obj:`git.repo.base.Repo`: Underlying Repo handle.""" return self._repo @property def remote_name(self): """str: The name of the remote used for querying branches and histories.""" return self._remote_name @property def development_branch_ref(self): """:obj:`git.refs.reference.Reference`: The git branch where active development occurs.""" refs = self.repo.remote(self._remote_name).refs return [ref for ref in refs if ref.name == '%s/%s' % (self._remote_name, self._development_branch)][0] @property def release_line_refs(self): """:obj:`list` of :obj:`git.refs.reference.Reference`: The git branches identified as "release lines", i.e., "branch-2".""" refs = self.repo.remote(self._remote_name).refs pattern = re.compile('%s/%s' % (self._remote_name, self._release_line_regexp)) return [ref for ref in refs if pattern.match(ref.name)] @property def release_branch_refs(self): """:obj:`list` of :obj:`git.refs.reference.Reference`: The git branches identified as "release branches", i.e., "branch-2.2".""" refs = self.repo.remote(self._remote_name).refs release_line_refs = self.release_line_refs return [ref for ref in refs if any([ref.name.startswith(release_line.name + '.') for release_line in release_line_refs])] @staticmethod def _open_repo(): return git.Repo(pathlib.Path(__file__).parent.absolute(), search_parent_directories=True) def identify_least_common_commit(self, ref_a, ref_b): """Given a pair of references, attempt to identify the commit that they have in common, i.e., the commit at which a "release branch" originates from a "release line" branch. """ commits = self._repo.merge_base(ref_a, ref_b, "--all") if commits: return commits[0] raise Exception("could not identify merge base between %s, %s" % (ref_a, ref_b)) @staticmethod def _skip(summary): return any([p.match(summary) for p in _RepoReader._skip_patterns]) @staticmethod def _identify_leading_jira_id(summary): match = _RepoReader._identify_leading_jira_id_pattern.match(summary) if match: return match.groups()[0] return None @staticmethod def _identify_backport_jira_id(summary): for pattern in _RepoReader._identify_backport_jira_id_patterns: match = pattern.match(summary) if match: return _RepoReader._identify_leading_jira_id(match.groups()[0]) return None @staticmethod def _identify_revert_jira_id(summary): match = _RepoReader._identify_revert_jira_id_pattern.match(summary) if match: return _RepoReader._identify_leading_jira_id(match.groups()[0]) return None @staticmethod def _identify_revert_revert_jira_id(summary): match = _RepoReader._identify_revert_revert_jira_id_pattern.match(summary) if match: return _RepoReader._identify_leading_jira_id(match.groups()[0]) return None @staticmethod def _identify_amend_jira_id(summary): match = _RepoReader._identify_amend_jira_id_pattern.match(summary) if match: return _RepoReader._identify_leading_jira_id(match.groups()[0]) return None @staticmethod def _action_jira_id_for(summary): jira_id = _RepoReader._identify_leading_jira_id(summary) if jira_id: return _DB.Action.ADD, jira_id jira_id = _RepoReader._identify_backport_jira_id(summary) if jira_id: return _DB.Action.ADD, jira_id jira_id = _RepoReader._identify_revert_jira_id(summary) if jira_id: return _DB.Action.REVERT, jira_id jira_id = _RepoReader._identify_revert_revert_jira_id(summary) if jira_id: return _DB.Action.ADD, jira_id jira_id = _RepoReader._identify_amend_jira_id(summary) if jira_id: return _DB.Action.ADD, jira_id return None def _extract_release_tag(self, commit): """works for extracting the tag, but need a way to retro-actively tag commits we've already seen.""" names = self._repo.git.name_rev(commit, tags=True, refs='rel/*') for name in names.split(' '): match = _RepoReader._extract_release_tag_pattern.match(name) if match: return match.groups()[0] return None def _set_release_tag(self, branch, tag, shas): cnt = 0 for sha in shas: self._db.apply_git_tag(branch, sha, tag) cnt += 1 if cnt % 50 == 0: self._db.flush_commits() self._db.flush_commits() def _resolve_ambiguity(self, commit): if commit.hexsha not in self._fallback_actions: LOG.warning('Unable to resolve action for %s: %s', commit.hexsha, commit.summary) return _DB.Action.SKIP, None action, jira_id = self._fallback_actions[commit.hexsha] if not jira_id: jira_id = None return _DB.Action[action], jira_id def _row_generator(self, branch, commit): if _RepoReader._skip(commit.summary): return None result = _RepoReader._action_jira_id_for(commit.summary) if not result: result = self._resolve_ambiguity(commit) if not result: raise Exception('Cannot resolve action for %s: %s' % (commit.hexsha, commit.summary)) action, jira_id = result return action, jira_id, branch, commit.hexsha def populate_db_release_branch(self, origin_commit, release_branch): """List all commits on `release_branch` since `origin_commit`, recording them as observations in the commits database. Args: origin_commit (:obj:`git.objects.commit.Commit`): The sha of the first commit to consider. release_branch (str): The name of the ref whose history is to be parsed. """ global MANAGER branch_filter_pattern = re.compile('%s/%s' % (self._remote_name, self._branch_filter_regexp)) if not branch_filter_pattern.match(release_branch): return commits = list(self._repo.iter_commits( "%s...%s" % (origin_commit.hexsha, release_branch), reverse=True)) LOG.info("%s has %d commits since its origin at %s.", release_branch, len(commits), origin_commit) counter = MANAGER.counter(total=len(commits), desc=release_branch, unit='commit') commits_since_release = list() cnt = 0 for commit in counter(commits): row = self._row_generator(release_branch, commit) if row: self._db.apply_commit(*row) cnt += 1 if cnt % 50 == 0: self._db.flush_commits() commits_since_release.append(commit.hexsha) if self._parse_release_tags: tag = self._extract_release_tag(commit) if tag: self._set_release_tag(release_branch, tag, commits_since_release) commits_since_release = list() self._db.flush_commits() @staticmethod def _load_fallback_actions(file): result = dict() if pathlib.Path(file).exists(): with open(file, 'r') as handle: reader = csv.DictReader(filter(lambda line: line[0] != '#', handle)) result = dict() for row in reader: result[row['hexsha']] = (row['action'], row['jira_id']) return result class _JiraReader: """This class interacts with the Jira instance. Args: db (:obj:`_DB`): A handle to the database manager. jira_url (str): URL of the Jira instance to query. **_kwargs: Convenience for CLI argument parsing. Ignored. """ def __init__(self, db, jira_url, **_kwargs): self._db = db self.client = jira.JIRA(jira_url) self.throttle_time_in_sec = 1 def populate_db(self): """Query Jira for issue IDs found in the commits database, writing them to the jira database.""" global MANAGER jira_ids = self._db.unique_jira_ids_from_git() LOG.info("retrieving %s jira_ids from the issue tracker", len(jira_ids)) counter = MANAGER.counter(total=len(jira_ids), desc='fetch from Jira', unit='issue') chunk_size = 50 chunks = [jira_ids[i:i + chunk_size] for i in range(0, len(jira_ids), chunk_size)] cnt = 0 for chunk in chunks: query = "key in (" + ",".join([("'" + jira_id + "'") for jira_id in chunk]) + ")" results = self.client.search_issues(jql_str=query, maxResults=chunk_size, fields='fixVersions') for result in results: jira_id = result.key fix_versions = [version.name for version in result.fields.fixVersions] for fix_version in fix_versions: self._db.apply_fix_version(jira_id, fix_version) cnt += 1 if cnt % 50: self._db.flush_commits() counter.update(incr=len(chunk)) time.sleep(5) self._db.flush_commits() def fetch_issues(self, jira_ids): """Retrieve the specified jira Ids.""" global MANAGER LOG.info("retrieving %s jira_ids from the issue tracker", len(jira_ids)) counter = MANAGER.counter(total=len(jira_ids), desc='fetch from Jira', unit='issue') chunk_size = 50 chunks = [jira_ids[i:i + chunk_size] for i in range(0, len(jira_ids), chunk_size)] ret = list() for chunk in chunks: query = "key IN (" + ",".join([("'" + jira_id + "'") for jira_id in chunk]) + ")"\ + " ORDER BY issuetype ASC, priority DESC, key ASC" results = self.client.search_issues( jql_str=query, maxResults=chunk_size, fields='summary,issuetype,priority,resolution,components') for result in results: val = dict() val['key'] = result.key val['summary'] = result.fields.summary.strip() val['priority'] = result.fields.priority.name.strip() val['issue_type'] = result.fields.issuetype.name.strip() \ if result.fields.issuetype else None val['resolution'] = result.fields.resolution.name.strip() \ if result.fields.resolution else None val['components'] = [x.name.strip() for x in result.fields.components if x] \ if result.fields.components else [] ret.append(val) counter.update(incr=len(chunk)) return ret class Auditor: """This class builds databases from git and Jira, making it possible to audit the two for discrepancies. At some point, it will provide pre-canned audit queries against those databases. It is the entrypoint to this application. Args: repo_reader (:obj:`_RepoReader`): An instance of the `_RepoReader`. jira_reader (:obj:`_JiraReader`): An instance of the `JiraReader`. db (:obj:`_DB`): A handle to the database manager. **_kwargs: Convenience for CLI argument parsing. Ignored. """ def __init__(self, repo_reader, jira_reader, db, **_kwargs): self._repo_reader = repo_reader self._jira_reader = jira_reader self._db = db self._release_line_fix_versions = dict() for k, v in _kwargs.items(): if k.endswith('_fix_version'): release_line = k[:-len('_fix_version')] self._release_line_fix_versions[release_line] = v def populate_db_from_git(self): """Process the git repository, populating the commits database.""" for release_line in self._repo_reader.release_line_refs: branch_origin = self._repo_reader.identify_least_common_commit( self._repo_reader.development_branch_ref.name, release_line.name) self._repo_reader.populate_db_release_branch(branch_origin, release_line.name) for release_branch in self._repo_reader.release_branch_refs: if not release_branch.name.startswith(release_line.name): continue self._repo_reader.populate_db_release_branch(branch_origin, release_branch.name) def populate_db_from_jira(self): """Process the Jira issues identified by the commits database, populating the jira database.""" self._jira_reader.populate_db() @staticmethod def _write_report(filename, issues): with open(filename, 'w') as file: fieldnames = ['key', 'issue_type', 'priority', 'summary', 'resolution', 'components'] writer = csv.DictWriter(file, fieldnames=fieldnames) writer.writeheader() for issue in issues: writer.writerow(issue) LOG.info('generated report at %s', filename) def report_new_for_release_line(self, release_line): """Builds a report of the Jira issues that are new on the target release line, not present on any of the associated release branches. (i.e., on branch-2 but not branch-{2.0,2.1,...})""" matches = [x for x in self._repo_reader.release_line_refs if x.name == release_line or x.remote_head == release_line] release_line_ref = next(iter(matches), None) if not release_line_ref: LOG.error('release line %s not found. available options are %s.', release_line, [x.name for x in self._repo_reader.release_line_refs]) return cursor = self._db.conn.execute(""" SELECT distinct jira_id FROM git_commits WHERE branch = ? EXCEPT SELECT distinct jira_id FROM git_commits WHERE branch LIKE ? """, (release_line_ref.name, '%s.%%' % release_line_ref.name)) jira_ids = [x[0] for x in cursor.fetchall()] issues = self._jira_reader.fetch_issues(jira_ids) filename = 'new_for_%s.csv' % release_line.replace('/', '-') Auditor._write_report(filename, issues) def report_new_for_release_branch(self, release_branch): """Builds a report of the Jira issues that are new on the target release branch, not present on any of the previous release branches. (i.e., on branch-2.3 but not branch-{2.0,2.1,...})""" matches = [x for x in self._repo_reader.release_branch_refs if x.name == release_branch or x.remote_head == release_branch] release_branch_ref = next(iter(matches), None) if not release_branch_ref: LOG.error('release branch %s not found. available options are %s.', release_branch, [x.name for x in self._repo_reader.release_branch_refs]) return previous_branches = [x.name for x in self._repo_reader.release_branch_refs if x.remote_head != release_branch_ref.remote_head] query = ( "SELECT distinct jira_id FROM git_commits" " WHERE branch = ?" " EXCEPT SELECT distinct jira_id FROM git_commits" f" WHERE branch IN ({','.join('?' for _ in previous_branches)})" ) cursor = self._db.conn.execute(query, tuple([release_branch_ref.name] + previous_branches)) jira_ids = [x[0] for x in cursor.fetchall()] issues = self._jira_reader.fetch_issues(jira_ids) filename = 'new_for_%s.csv' % release_branch.replace('/', '-') Auditor._write_report(filename, issues) @staticmethod def _str_to_bool(val): if not val: return False return val.lower() in ['true', 't', 'yes', 'y'] @staticmethod def _build_first_pass_parser(): parser = argparse.ArgumentParser(add_help=False) building_group = parser.add_argument_group(title='Building the audit database') building_group.add_argument( '--populate-from-git', help='When true, populate the audit database from the Git repository.', type=Auditor._str_to_bool, default=True) building_group.add_argument( '--populate-from-jira', help='When true, populate the audit database from Jira.', type=Auditor._str_to_bool, default=True) building_group.add_argument( '--db-path', help='Path to the database file, or leave unspecified for a transient db.', default='audit.db') building_group.add_argument( '--initialize-db', help='When true, initialize the database tables. This is destructive to the contents' + ' of an existing database.', type=Auditor._str_to_bool, default=False) report_group = parser.add_argument_group('Generating reports') report_group.add_argument( '--report-new-for-release-line', help=Auditor.report_new_for_release_line.__doc__, type=str, default=None) report_group.add_argument( '--report-new-for-release-branch', help=Auditor.report_new_for_release_branch.__doc__, type=str, default=None) git_repo_group = parser.add_argument_group('Interactions with the Git repo') git_repo_group.add_argument( '--git-repo-path', help='Path to the git repo, or leave unspecified to infer from the current' + ' file\'s path.', default=__file__) git_repo_group.add_argument( '--remote-name', help='The name of the git remote to use when identifying branches.' + ' Default: \'origin\'', default='origin') git_repo_group.add_argument( '--development-branch', help='The name of the branch from which all release lines originate.' + ' Default: \'master\'', default='master') git_repo_group.add_argument( '--development-branch-fix-version', help='The Jira fixVersion used to indicate an issue is committed to the development' + ' branch.', default='3.0.0') git_repo_group.add_argument( '--release-line-regexp', help='A regexp used to identify release lines.', default=r'branch-\d+$') git_repo_group.add_argument( '--parse-release-tags', help='When true, look for release tags and annotate commits according to their release' + ' version. An Expensive calculation, disabled by default.', type=Auditor._str_to_bool, default=False) git_repo_group.add_argument( '--fallback-actions-path', help='Path to a file containing _DB.Actions applicable to specific git shas.', default='fallback_actions.csv') git_repo_group.add_argument( '--branch-filter-regexp', help='Limit repo parsing to branch names that match this filter expression.', default=r'.*') jira_group = parser.add_argument_group('Interactions with Jira') jira_group.add_argument( '--jira-url', help='A URL locating the target JIRA instance.', default='https://issues.apache.org/jira') return parser, git_repo_group @staticmethod def _build_second_pass_parser(repo_reader, parent_parser, git_repo_group): for release_line in repo_reader.release_line_refs: name = release_line.name git_repo_group.add_argument( '--%s-fix-version' % name[len(repo_reader.remote_name) + 1:], help='The Jira fixVersion used to indicate an issue is committed to the specified ' + 'release line branch', required=True) return argparse.ArgumentParser( parents=[parent_parser], formatter_class=argparse.ArgumentDefaultsHelpFormatter ) MANAGER = None def main(): global MANAGER logging.basicConfig(level=logging.INFO) first_pass_parser, git_repo_group = Auditor._build_first_pass_parser() first_pass_args, extras = first_pass_parser.parse_known_args() first_pass_args_dict = vars(first_pass_args) with _DB(**first_pass_args_dict) as db: repo_reader = _RepoReader(db, **first_pass_args_dict) jira_reader = _JiraReader(db, **first_pass_args_dict) second_pass_parser = Auditor._build_second_pass_parser( repo_reader, first_pass_parser, git_repo_group) second_pass_args = second_pass_parser.parse_args(extras, first_pass_args) second_pass_args_dict = vars(second_pass_args) auditor = Auditor(repo_reader, jira_reader, db, **second_pass_args_dict) with enlighten.get_manager() as MANAGER: if second_pass_args.populate_from_git: auditor.populate_db_from_git() if second_pass_args.populate_from_jira: auditor.populate_db_from_jira() if second_pass_args.report_new_for_release_line: release_line = second_pass_args.report_new_for_release_line auditor.report_new_for_release_line(release_line) if second_pass_args.report_new_for_release_branch: release_branch = second_pass_args.report_new_for_release_branch auditor.report_new_for_release_branch(release_branch) if __name__ == '__main__': main()
apache-2.0
jrossyra/adaptivemd
docs/conf.py
2
7388
# -*- coding: utf-8 -*- # # AdaptiveMD documentation build configuration file, created by # sphinx-quickstart on Thu Mar 9 22:49:18 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath('../')) import pprint pprint.pprint(sys.path) try: from mock import Mock as MagicMock except ImportError: from unittest.mock import Mock as MagicMock class Mock(MagicMock): @classmethod def __getattr__(cls, name): return Mock() __name__ = "Mock" MOCK_MODULES = ['mdtraj', 'mdtraj.reporters', 'ujson', 'pymongo', 'pymongo.errors', 'gridfs', 'numpy', 'simtk', 'simtk.unit', 'simtk.openmm', 'simtk.openmm.app', 'pyemma.coordinates', ] sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' import adaptivemd print("Generating doc for AdaptiveMD version {version} installed in {path}" .format(version=adaptivemd.__version__, path=adaptivemd.__path__)) # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.mathjax', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', 'sphinx.ext.githubpages', 'sphinx.ext.autosummary', 'nbsphinx', ] nbsphinx_execute = 'never' # issuetracker = 'github' # issuetracker_project = 'markovmodel/adaptivemd' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # add pandoc directives #pandoc_from = ['markdown', 'mediawiki'] #sys.path.insert(0, os.path.abspath('sphinxext')) #extensions.append('notebook_sphinxext') #extensions.append('pandoc_sphinxext') # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # markdown support: https://blog.readthedocs.com/adding-markdown-support/ from recommonmark.parser import CommonMarkParser source_suffix = ['.rst', '.md'] source_parsers = {'.md': CommonMarkParser,} # The master toctree document. master_doc = 'index' # General information about the project. project = u'AdaptiveMD' copyright = u'2017, Jan-Hendrik Prinz, Frank Noé' author = u'Jan-Hendrik Prinz, Frank Noé' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = adaptivemd.__version__ # The full version, including alpha/beta/rc tags. release = adaptivemd.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '**/.ipynb_checkpoints', ] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # import sphinx_rtd_theme html_theme = "sphinx_rtd_theme" html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'AdaptiveMDdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'AdaptiveMD.tex', u'AdaptiveMD Documentation', u'Jan-Hendrik Prinz, Frank Noé', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'adaptivemd', u'AdaptiveMD Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'AdaptiveMD', u'AdaptiveMD Documentation', author, 'AdaptiveMD', 'One line description of project.', 'Miscellaneous'), ] # ----------------------------------------------------------------------------- # Autosummary # ----------------------------------------------------------------------------- autosummary_generate = True autodoc_default_flags = ['members'] autodoc_member_order = 'bysource' autoclass_content = 'both' numpydoc_class_members_toctree = True # spell checking spelling_lang = 'en_US' spelling_word_list_filename = 'spelling_wordlist.txt' spelling_show_suggestions = True # Napoleon settings napoleon_google_docstring = False napoleon_numpy_docstring = True napoleon_include_init_with_doc = True napoleon_include_private_with_doc = False napoleon_include_special_with_doc = False napoleon_use_admonition_for_examples = False napoleon_use_admonition_for_notes = True napoleon_use_admonition_for_references = False napoleon_use_ivar = True napoleon_use_param = True napoleon_use_rtype = True
lgpl-2.1
RalphBariz/RalphsDotNet
Old/RalphsDotNet.Apps.OptimizationStudio/Resources/PyLib/xml/etree/ElementTree.py
42
57821
# # ElementTree # $Id: ElementTree.py 3440 2008-07-18 14:45:01Z fredrik $ # # light-weight XML support for Python 2.3 and later. # # history (since 1.2.6): # 2005-11-12 fl added tostringlist/fromstringlist helpers # 2006-07-05 fl merged in selected changes from the 1.3 sandbox # 2006-07-05 fl removed support for 2.1 and earlier # 2007-06-21 fl added deprecation/future warnings # 2007-08-25 fl added doctype hook, added parser version attribute etc # 2007-08-26 fl added new serializer code (better namespace handling, etc) # 2007-08-27 fl warn for broken /tag searches on tree level # 2007-09-02 fl added html/text methods to serializer (experimental) # 2007-09-05 fl added method argument to tostring/tostringlist # 2007-09-06 fl improved error handling # 2007-09-13 fl added itertext, iterfind; assorted cleanups # 2007-12-15 fl added C14N hooks, copy method (experimental) # # Copyright (c) 1999-2008 by Fredrik Lundh. All rights reserved. # # fredrik@pythonware.com # http://www.pythonware.com # # -------------------------------------------------------------------- # The ElementTree toolkit is # # Copyright (c) 1999-2008 by Fredrik Lundh # # By obtaining, using, and/or copying this software and/or its # associated documentation, you agree that you have read, understood, # and will comply with the following terms and conditions: # # Permission to use, copy, modify, and distribute this software and # its associated documentation for any purpose and without fee is # hereby granted, provided that the above copyright notice appears in # all copies, and that both that copyright notice and this permission # notice appear in supporting documentation, and that the name of # Secret Labs AB or the author not be used in advertising or publicity # pertaining to distribution of the software without specific, written # prior permission. # # SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD # TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- # ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR # BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE # OF THIS SOFTWARE. # -------------------------------------------------------------------- # Licensed to PSF under a Contributor Agreement. # See http://www.python.org/psf/license for licensing details. __all__ = [ # public symbols "Comment", "dump", "Element", "ElementTree", "fromstring", "fromstringlist", "iselement", "iterparse", "parse", "ParseError", "PI", "ProcessingInstruction", "QName", "SubElement", "tostring", "tostringlist", "TreeBuilder", "VERSION", "XML", "XMLParser", "XMLTreeBuilder", ] VERSION = "1.3.0" ## # The <b>Element</b> type is a flexible container object, designed to # store hierarchical data structures in memory. The type can be # described as a cross between a list and a dictionary. # <p> # Each element has a number of properties associated with it: # <ul> # <li>a <i>tag</i>. This is a string identifying what kind of data # this element represents (the element type, in other words).</li> # <li>a number of <i>attributes</i>, stored in a Python dictionary.</li> # <li>a <i>text</i> string.</li> # <li>an optional <i>tail</i> string.</li> # <li>a number of <i>child elements</i>, stored in a Python sequence</li> # </ul> # # To create an element instance, use the {@link #Element} constructor # or the {@link #SubElement} factory function. # <p> # The {@link #ElementTree} class can be used to wrap an element # structure, and convert it from and to XML. ## import sys import re import warnings class _SimpleElementPath(object): # emulate pre-1.2 find/findtext/findall behaviour def find(self, element, tag, namespaces=None): for elem in element: if elem.tag == tag: return elem return None def findtext(self, element, tag, default=None, namespaces=None): elem = self.find(element, tag) if elem is None: return default return elem.text or "" def iterfind(self, element, tag, namespaces=None): if tag[:3] == ".//": for elem in element.iter(tag[3:]): yield elem for elem in element: if elem.tag == tag: yield elem def findall(self, element, tag, namespaces=None): return list(self.iterfind(element, tag, namespaces)) try: from . import ElementPath except ImportError: ElementPath = _SimpleElementPath() ## # Parser error. This is a subclass of <b>SyntaxError</b>. # <p> # In addition to the exception value, an exception instance contains a # specific exception code in the <b>code</b> attribute, and the line and # column of the error in the <b>position</b> attribute. class ParseError(SyntaxError): pass # -------------------------------------------------------------------- ## # Checks if an object appears to be a valid element object. # # @param An element instance. # @return A true value if this is an element object. # @defreturn flag def iselement(element): # FIXME: not sure about this; might be a better idea to look # for tag/attrib/text attributes return isinstance(element, Element) or hasattr(element, "tag") ## # Element class. This class defines the Element interface, and # provides a reference implementation of this interface. # <p> # The element name, attribute names, and attribute values can be # either ASCII strings (ordinary Python strings containing only 7-bit # ASCII characters) or Unicode strings. # # @param tag The element name. # @param attrib An optional dictionary, containing element attributes. # @param **extra Additional attributes, given as keyword arguments. # @see Element # @see SubElement # @see Comment # @see ProcessingInstruction class Element(object): # <tag attrib>text<child/>...</tag>tail ## # (Attribute) Element tag. tag = None ## # (Attribute) Element attribute dictionary. Where possible, use # {@link #Element.get}, # {@link #Element.set}, # {@link #Element.keys}, and # {@link #Element.items} to access # element attributes. attrib = None ## # (Attribute) Text before first subelement. This is either a # string or the value None. Note that if there was no text, this # attribute may be either None or an empty string, depending on # the parser. text = None ## # (Attribute) Text after this element's end tag, but before the # next sibling element's start tag. This is either a string or # the value None. Note that if there was no text, this attribute # may be either None or an empty string, depending on the parser. tail = None # text after end tag, if any # constructor def __init__(self, tag, attrib={}, **extra): attrib = attrib.copy() attrib.update(extra) self.tag = tag self.attrib = attrib self._children = [] def __repr__(self): return "<Element %s at 0x%x>" % (repr(self.tag), id(self)) ## # Creates a new element object of the same type as this element. # # @param tag Element tag. # @param attrib Element attributes, given as a dictionary. # @return A new element instance. def makeelement(self, tag, attrib): return self.__class__(tag, attrib) ## # (Experimental) Copies the current element. This creates a # shallow copy; subelements will be shared with the original tree. # # @return A new element instance. def copy(self): elem = self.makeelement(self.tag, self.attrib) elem.text = self.text elem.tail = self.tail elem[:] = self return elem ## # Returns the number of subelements. Note that this only counts # full elements; to check if there's any content in an element, you # have to check both the length and the <b>text</b> attribute. # # @return The number of subelements. def __len__(self): return len(self._children) def __nonzero__(self): warnings.warn( "The behavior of this method will change in future versions. " "Use specific 'len(elem)' or 'elem is not None' test instead.", FutureWarning, stacklevel=2 ) return len(self._children) != 0 # emulate old behaviour, for now ## # Returns the given subelement, by index. # # @param index What subelement to return. # @return The given subelement. # @exception IndexError If the given element does not exist. def __getitem__(self, index): return self._children[index] ## # Replaces the given subelement, by index. # # @param index What subelement to replace. # @param element The new element value. # @exception IndexError If the given element does not exist. def __setitem__(self, index, element): # if isinstance(index, slice): # for elt in element: # assert iselement(elt) # else: # assert iselement(element) self._children[index] = element ## # Deletes the given subelement, by index. # # @param index What subelement to delete. # @exception IndexError If the given element does not exist. def __delitem__(self, index): del self._children[index] ## # Adds a subelement to the end of this element. In document order, # the new element will appear after the last existing subelement (or # directly after the text, if it's the first subelement), but before # the end tag for this element. # # @param element The element to add. def append(self, element): # assert iselement(element) self._children.append(element) ## # Appends subelements from a sequence. # # @param elements A sequence object with zero or more elements. # @since 1.3 def extend(self, elements): # for element in elements: # assert iselement(element) self._children.extend(elements) ## # Inserts a subelement at the given position in this element. # # @param index Where to insert the new subelement. def insert(self, index, element): # assert iselement(element) self._children.insert(index, element) ## # Removes a matching subelement. Unlike the <b>find</b> methods, # this method compares elements based on identity, not on tag # value or contents. To remove subelements by other means, the # easiest way is often to use a list comprehension to select what # elements to keep, and use slice assignment to update the parent # element. # # @param element What element to remove. # @exception ValueError If a matching element could not be found. def remove(self, element): # assert iselement(element) self._children.remove(element) ## # (Deprecated) Returns all subelements. The elements are returned # in document order. # # @return A list of subelements. # @defreturn list of Element instances def getchildren(self): warnings.warn( "This method will be removed in future versions. " "Use 'list(elem)' or iteration over elem instead.", DeprecationWarning, stacklevel=2 ) return self._children ## # Finds the first matching subelement, by tag name or path. # # @param path What element to look for. # @keyparam namespaces Optional namespace prefix map. # @return The first matching element, or None if no element was found. # @defreturn Element or None def find(self, path, namespaces=None): return ElementPath.find(self, path, namespaces) ## # Finds text for the first matching subelement, by tag name or path. # # @param path What element to look for. # @param default What to return if the element was not found. # @keyparam namespaces Optional namespace prefix map. # @return The text content of the first matching element, or the # default value no element was found. Note that if the element # is found, but has no text content, this method returns an # empty string. # @defreturn string def findtext(self, path, default=None, namespaces=None): return ElementPath.findtext(self, path, default, namespaces) ## # Finds all matching subelements, by tag name or path. # # @param path What element to look for. # @keyparam namespaces Optional namespace prefix map. # @return A list or other sequence containing all matching elements, # in document order. # @defreturn list of Element instances def findall(self, path, namespaces=None): return ElementPath.findall(self, path, namespaces) ## # Finds all matching subelements, by tag name or path. # # @param path What element to look for. # @keyparam namespaces Optional namespace prefix map. # @return An iterator or sequence containing all matching elements, # in document order. # @defreturn a generated sequence of Element instances def iterfind(self, path, namespaces=None): return ElementPath.iterfind(self, path, namespaces) ## # Resets an element. This function removes all subelements, clears # all attributes, and sets the <b>text</b> and <b>tail</b> attributes # to None. def clear(self): self.attrib.clear() self._children = [] self.text = self.tail = None ## # Gets an element attribute. Equivalent to <b>attrib.get</b>, but # some implementations may handle this a bit more efficiently. # # @param key What attribute to look for. # @param default What to return if the attribute was not found. # @return The attribute value, or the default value, if the # attribute was not found. # @defreturn string or None def get(self, key, default=None): return self.attrib.get(key, default) ## # Sets an element attribute. Equivalent to <b>attrib[key] = value</b>, # but some implementations may handle this a bit more efficiently. # # @param key What attribute to set. # @param value The attribute value. def set(self, key, value): self.attrib[key] = value ## # Gets a list of attribute names. The names are returned in an # arbitrary order (just like for an ordinary Python dictionary). # Equivalent to <b>attrib.keys()</b>. # # @return A list of element attribute names. # @defreturn list of strings def keys(self): return self.attrib.keys() ## # Gets element attributes, as a sequence. The attributes are # returned in an arbitrary order. Equivalent to <b>attrib.items()</b>. # # @return A list of (name, value) tuples for all attributes. # @defreturn list of (string, string) tuples def items(self): return self.attrib.items() ## # Creates a tree iterator. The iterator loops over this element # and all subelements, in document order, and returns all elements # with a matching tag. # <p> # If the tree structure is modified during iteration, new or removed # elements may or may not be included. To get a stable set, use the # list() function on the iterator, and loop over the resulting list. # # @param tag What tags to look for (default is to return all elements). # @return An iterator containing all the matching elements. # @defreturn iterator def iter(self, tag=None): if tag == "*": tag = None if tag is None or self.tag == tag: yield self for e in self._children: for e in e.iter(tag): yield e # compatibility def getiterator(self, tag=None): # Change for a DeprecationWarning in 1.4 warnings.warn( "This method will be removed in future versions. " "Use 'elem.iter()' or 'list(elem.iter())' instead.", PendingDeprecationWarning, stacklevel=2 ) return list(self.iter(tag)) ## # Creates a text iterator. The iterator loops over this element # and all subelements, in document order, and returns all inner # text. # # @return An iterator containing all inner text. # @defreturn iterator def itertext(self): tag = self.tag if not isinstance(tag, basestring) and tag is not None: return if self.text: yield self.text for e in self: for s in e.itertext(): yield s if e.tail: yield e.tail # compatibility _Element = _ElementInterface = Element ## # Subelement factory. This function creates an element instance, and # appends it to an existing element. # <p> # The element name, attribute names, and attribute values can be # either 8-bit ASCII strings or Unicode strings. # # @param parent The parent element. # @param tag The subelement name. # @param attrib An optional dictionary, containing element attributes. # @param **extra Additional attributes, given as keyword arguments. # @return An element instance. # @defreturn Element def SubElement(parent, tag, attrib={}, **extra): attrib = attrib.copy() attrib.update(extra) element = parent.makeelement(tag, attrib) parent.append(element) return element ## # Comment element factory. This factory function creates a special # element that will be serialized as an XML comment by the standard # serializer. # <p> # The comment string can be either an 8-bit ASCII string or a Unicode # string. # # @param text A string containing the comment string. # @return An element instance, representing a comment. # @defreturn Element def Comment(text=None): element = Element(Comment) element.text = text return element ## # PI element factory. This factory function creates a special element # that will be serialized as an XML processing instruction by the standard # serializer. # # @param target A string containing the PI target. # @param text A string containing the PI contents, if any. # @return An element instance, representing a PI. # @defreturn Element def ProcessingInstruction(target, text=None): element = Element(ProcessingInstruction) element.text = target if text: element.text = element.text + " " + text return element PI = ProcessingInstruction ## # QName wrapper. This can be used to wrap a QName attribute value, in # order to get proper namespace handling on output. # # @param text A string containing the QName value, in the form {uri}local, # or, if the tag argument is given, the URI part of a QName. # @param tag Optional tag. If given, the first argument is interpreted as # an URI, and this argument is interpreted as a local name. # @return An opaque object, representing the QName. class QName(object): def __init__(self, text_or_uri, tag=None): if tag: text_or_uri = "{%s}%s" % (text_or_uri, tag) self.text = text_or_uri def __str__(self): return self.text def __hash__(self): return hash(self.text) def __cmp__(self, other): if isinstance(other, QName): return cmp(self.text, other.text) return cmp(self.text, other) # -------------------------------------------------------------------- ## # ElementTree wrapper class. This class represents an entire element # hierarchy, and adds some extra support for serialization to and from # standard XML. # # @param element Optional root element. # @keyparam file Optional file handle or file name. If given, the # tree is initialized with the contents of this XML file. class ElementTree(object): def __init__(self, element=None, file=None): # assert element is None or iselement(element) self._root = element # first node if file: self.parse(file) ## # Gets the root element for this tree. # # @return An element instance. # @defreturn Element def getroot(self): return self._root ## # Replaces the root element for this tree. This discards the # current contents of the tree, and replaces it with the given # element. Use with care. # # @param element An element instance. def _setroot(self, element): # assert iselement(element) self._root = element ## # Loads an external XML document into this element tree. # # @param source A file name or file object. If a file object is # given, it only has to implement a <b>read(n)</b> method. # @keyparam parser An optional parser instance. If not given, the # standard {@link XMLParser} parser is used. # @return The document root element. # @defreturn Element # @exception ParseError If the parser fails to parse the document. def parse(self, source, parser=None): if not hasattr(source, "read"): source = open(source, "rb") if not parser: parser = XMLParser(target=TreeBuilder()) while 1: data = source.read(65536) if not data: break parser.feed(data) self._root = parser.close() return self._root ## # Creates a tree iterator for the root element. The iterator loops # over all elements in this tree, in document order. # # @param tag What tags to look for (default is to return all elements) # @return An iterator. # @defreturn iterator def iter(self, tag=None): # assert self._root is not None return self._root.iter(tag) # compatibility def getiterator(self, tag=None): # Change for a DeprecationWarning in 1.4 warnings.warn( "This method will be removed in future versions. " "Use 'tree.iter()' or 'list(tree.iter())' instead.", PendingDeprecationWarning, stacklevel=2 ) return list(self.iter(tag)) ## # Finds the first toplevel element with given tag. # Same as getroot().find(path). # # @param path What element to look for. # @keyparam namespaces Optional namespace prefix map. # @return The first matching element, or None if no element was found. # @defreturn Element or None def find(self, path, namespaces=None): # assert self._root is not None if path[:1] == "/": path = "." + path warnings.warn( "This search is broken in 1.3 and earlier, and will be " "fixed in a future version. If you rely on the current " "behaviour, change it to %r" % path, FutureWarning, stacklevel=2 ) return self._root.find(path, namespaces) ## # Finds the element text for the first toplevel element with given # tag. Same as getroot().findtext(path). # # @param path What toplevel element to look for. # @param default What to return if the element was not found. # @keyparam namespaces Optional namespace prefix map. # @return The text content of the first matching element, or the # default value no element was found. Note that if the element # is found, but has no text content, this method returns an # empty string. # @defreturn string def findtext(self, path, default=None, namespaces=None): # assert self._root is not None if path[:1] == "/": path = "." + path warnings.warn( "This search is broken in 1.3 and earlier, and will be " "fixed in a future version. If you rely on the current " "behaviour, change it to %r" % path, FutureWarning, stacklevel=2 ) return self._root.findtext(path, default, namespaces) ## # Finds all toplevel elements with the given tag. # Same as getroot().findall(path). # # @param path What element to look for. # @keyparam namespaces Optional namespace prefix map. # @return A list or iterator containing all matching elements, # in document order. # @defreturn list of Element instances def findall(self, path, namespaces=None): # assert self._root is not None if path[:1] == "/": path = "." + path warnings.warn( "This search is broken in 1.3 and earlier, and will be " "fixed in a future version. If you rely on the current " "behaviour, change it to %r" % path, FutureWarning, stacklevel=2 ) return self._root.findall(path, namespaces) ## # Finds all matching subelements, by tag name or path. # Same as getroot().iterfind(path). # # @param path What element to look for. # @keyparam namespaces Optional namespace prefix map. # @return An iterator or sequence containing all matching elements, # in document order. # @defreturn a generated sequence of Element instances def iterfind(self, path, namespaces=None): # assert self._root is not None if path[:1] == "/": path = "." + path warnings.warn( "This search is broken in 1.3 and earlier, and will be " "fixed in a future version. If you rely on the current " "behaviour, change it to %r" % path, FutureWarning, stacklevel=2 ) return self._root.iterfind(path, namespaces) ## # Writes the element tree to a file, as XML. # # @def write(file, **options) # @param file A file name, or a file object opened for writing. # @param **options Options, given as keyword arguments. # @keyparam encoding Optional output encoding (default is US-ASCII). # @keyparam method Optional output method ("xml", "html", "text" or # "c14n"; default is "xml"). # @keyparam xml_declaration Controls if an XML declaration should # be added to the file. Use False for never, True for always, # None for only if not US-ASCII or UTF-8. None is default. def write(self, file_or_filename, # keyword arguments encoding=None, xml_declaration=None, default_namespace=None, method=None): # assert self._root is not None if not method: method = "xml" elif method not in _serialize: # FIXME: raise an ImportError for c14n if ElementC14N is missing? raise ValueError("unknown method %r" % method) if hasattr(file_or_filename, "write"): file = file_or_filename else: file = open(file_or_filename, "wb") write = file.write if not encoding: if method == "c14n": encoding = "utf-8" else: encoding = "us-ascii" elif xml_declaration or (xml_declaration is None and encoding not in ("utf-8", "us-ascii")): if method == "xml": write("<?xml version='1.0' encoding='%s'?>\n" % encoding) if method == "text": _serialize_text(write, self._root, encoding) else: qnames, namespaces = _namespaces( self._root, encoding, default_namespace ) serialize = _serialize[method] serialize(write, self._root, encoding, qnames, namespaces) if file_or_filename is not file: file.close() def write_c14n(self, file): # lxml.etree compatibility. use output method instead return self.write(file, method="c14n") # -------------------------------------------------------------------- # serialization support def _namespaces(elem, encoding, default_namespace=None): # identify namespaces used in this tree # maps qnames to *encoded* prefix:local names qnames = {None: None} # maps uri:s to prefixes namespaces = {} if default_namespace: namespaces[default_namespace] = "" def encode(text): return text.encode(encoding) def add_qname(qname): # calculate serialized qname representation try: if qname[:1] == "{": uri, tag = qname[1:].rsplit("}", 1) prefix = namespaces.get(uri) if prefix is None: prefix = _namespace_map.get(uri) if prefix is None: prefix = "ns%d" % len(namespaces) if prefix != "xml": namespaces[uri] = prefix if prefix: qnames[qname] = encode("%s:%s" % (prefix, tag)) else: qnames[qname] = encode(tag) # default element else: if default_namespace: # FIXME: can this be handled in XML 1.0? raise ValueError( "cannot use non-qualified names with " "default_namespace option" ) qnames[qname] = encode(qname) except TypeError: _raise_serialization_error(qname) # populate qname and namespaces table try: iterate = elem.iter except AttributeError: iterate = elem.getiterator # cET compatibility for elem in iterate(): tag = elem.tag if isinstance(tag, QName): if tag.text not in qnames: add_qname(tag.text) elif isinstance(tag, basestring): if tag not in qnames: add_qname(tag) elif tag is not None and tag is not Comment and tag is not PI: _raise_serialization_error(tag) for key, value in elem.items(): if isinstance(key, QName): key = key.text if key not in qnames: add_qname(key) if isinstance(value, QName) and value.text not in qnames: add_qname(value.text) text = elem.text if isinstance(text, QName) and text.text not in qnames: add_qname(text.text) return qnames, namespaces def _serialize_xml(write, elem, encoding, qnames, namespaces): tag = elem.tag text = elem.text if tag is Comment: write("<!--%s-->" % _encode(text, encoding)) elif tag is ProcessingInstruction: write("<?%s?>" % _encode(text, encoding)) else: tag = qnames[tag] if tag is None: if text: write(_escape_cdata(text, encoding)) for e in elem: _serialize_xml(write, e, encoding, qnames, None) else: write("<" + tag) items = elem.items() if items or namespaces: if namespaces: for v, k in sorted(namespaces.items(), key=lambda x: x[1]): # sort on prefix if k: k = ":" + k write(" xmlns%s=\"%s\"" % ( k.encode(encoding), _escape_attrib(v, encoding) )) for k, v in sorted(items): # lexical order if isinstance(k, QName): k = k.text if isinstance(v, QName): v = qnames[v.text] else: v = _escape_attrib(v, encoding) write(" %s=\"%s\"" % (qnames[k], v)) if text or len(elem): write(">") if text: write(_escape_cdata(text, encoding)) for e in elem: _serialize_xml(write, e, encoding, qnames, None) write("</" + tag + ">") else: write(" />") if elem.tail: write(_escape_cdata(elem.tail, encoding)) HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr", "img", "input", "isindex", "link", "meta" "param") try: HTML_EMPTY = set(HTML_EMPTY) except NameError: pass def _serialize_html(write, elem, encoding, qnames, namespaces): tag = elem.tag text = elem.text if tag is Comment: write("<!--%s-->" % _escape_cdata(text, encoding)) elif tag is ProcessingInstruction: write("<?%s?>" % _escape_cdata(text, encoding)) else: tag = qnames[tag] if tag is None: if text: write(_escape_cdata(text, encoding)) for e in elem: _serialize_html(write, e, encoding, qnames, None) else: write("<" + tag) items = elem.items() if items or namespaces: if namespaces: for v, k in sorted(namespaces.items(), key=lambda x: x[1]): # sort on prefix if k: k = ":" + k write(" xmlns%s=\"%s\"" % ( k.encode(encoding), _escape_attrib(v, encoding) )) for k, v in sorted(items): # lexical order if isinstance(k, QName): k = k.text if isinstance(v, QName): v = qnames[v.text] else: v = _escape_attrib_html(v, encoding) # FIXME: handle boolean attributes write(" %s=\"%s\"" % (qnames[k], v)) write(">") tag = tag.lower() if text: if tag == "script" or tag == "style": write(_encode(text, encoding)) else: write(_escape_cdata(text, encoding)) for e in elem: _serialize_html(write, e, encoding, qnames, None) if tag not in HTML_EMPTY: write("</" + tag + ">") if elem.tail: write(_escape_cdata(elem.tail, encoding)) def _serialize_text(write, elem, encoding): for part in elem.itertext(): write(part.encode(encoding)) if elem.tail: write(elem.tail.encode(encoding)) _serialize = { "xml": _serialize_xml, "html": _serialize_html, "text": _serialize_text, # this optional method is imported at the end of the module # "c14n": _serialize_c14n, } ## # Registers a namespace prefix. The registry is global, and any # existing mapping for either the given prefix or the namespace URI # will be removed. # # @param prefix Namespace prefix. # @param uri Namespace uri. Tags and attributes in this namespace # will be serialized with the given prefix, if at all possible. # @exception ValueError If the prefix is reserved, or is otherwise # invalid. def register_namespace(prefix, uri): if re.match("ns\d+$", prefix): raise ValueError("Prefix format reserved for internal use") for k, v in _namespace_map.items(): if k == uri or v == prefix: del _namespace_map[k] _namespace_map[uri] = prefix _namespace_map = { # "well-known" namespace prefixes "http://www.w3.org/XML/1998/namespace": "xml", "http://www.w3.org/1999/xhtml": "html", "http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf", "http://schemas.xmlsoap.org/wsdl/": "wsdl", # xml schema "http://www.w3.org/2001/XMLSchema": "xs", "http://www.w3.org/2001/XMLSchema-instance": "xsi", # dublin core "http://purl.org/dc/elements/1.1/": "dc", } def _raise_serialization_error(text): raise TypeError( "cannot serialize %r (type %s)" % (text, type(text).__name__) ) def _encode(text, encoding): try: return text.encode(encoding, "xmlcharrefreplace") except (TypeError, AttributeError): _raise_serialization_error(text) def _escape_cdata(text, encoding): # escape character data try: # it's worth avoiding do-nothing calls for strings that are # shorter than 500 character, or so. assume that's, by far, # the most common case in most applications. if "&" in text: text = text.replace("&", "&amp;") if "<" in text: text = text.replace("<", "&lt;") if ">" in text: text = text.replace(">", "&gt;") return text.encode(encoding, "xmlcharrefreplace") except (TypeError, AttributeError): _raise_serialization_error(text) def _escape_attrib(text, encoding): # escape attribute value try: if "&" in text: text = text.replace("&", "&amp;") if "<" in text: text = text.replace("<", "&lt;") if ">" in text: text = text.replace(">", "&gt;") if "\"" in text: text = text.replace("\"", "&quot;") if "\n" in text: text = text.replace("\n", "&#10;") return text.encode(encoding, "xmlcharrefreplace") except (TypeError, AttributeError): _raise_serialization_error(text) def _escape_attrib_html(text, encoding): # escape attribute value try: if "&" in text: text = text.replace("&", "&amp;") if ">" in text: text = text.replace(">", "&gt;") if "\"" in text: text = text.replace("\"", "&quot;") return text.encode(encoding, "xmlcharrefreplace") except (TypeError, AttributeError): _raise_serialization_error(text) # -------------------------------------------------------------------- ## # Generates a string representation of an XML element, including all # subelements. # # @param element An Element instance. # @keyparam encoding Optional output encoding (default is US-ASCII). # @keyparam method Optional output method ("xml", "html", "text" or # "c14n"; default is "xml"). # @return An encoded string containing the XML data. # @defreturn string def tostring(element, encoding=None, method=None): class dummy: pass data = [] file = dummy() file.write = data.append ElementTree(element).write(file, encoding, method=method) return "".join(data) ## # Generates a string representation of an XML element, including all # subelements. The string is returned as a sequence of string fragments. # # @param element An Element instance. # @keyparam encoding Optional output encoding (default is US-ASCII). # @keyparam method Optional output method ("xml", "html", "text" or # "c14n"; default is "xml"). # @return A sequence object containing the XML data. # @defreturn sequence # @since 1.3 def tostringlist(element, encoding=None, method=None): class dummy: pass data = [] file = dummy() file.write = data.append ElementTree(element).write(file, encoding, method=method) # FIXME: merge small fragments into larger parts return data ## # Writes an element tree or element structure to sys.stdout. This # function should be used for debugging only. # <p> # The exact output format is implementation dependent. In this # version, it's written as an ordinary XML file. # # @param elem An element tree or an individual element. def dump(elem): # debugging if not isinstance(elem, ElementTree): elem = ElementTree(elem) elem.write(sys.stdout) tail = elem.getroot().tail if not tail or tail[-1] != "\n": sys.stdout.write("\n") # -------------------------------------------------------------------- # parsing ## # Parses an XML document into an element tree. # # @param source A filename or file object containing XML data. # @param parser An optional parser instance. If not given, the # standard {@link XMLParser} parser is used. # @return An ElementTree instance def parse(source, parser=None): tree = ElementTree() tree.parse(source, parser) return tree ## # Parses an XML document into an element tree incrementally, and reports # what's going on to the user. # # @param source A filename or file object containing XML data. # @param events A list of events to report back. If omitted, only "end" # events are reported. # @param parser An optional parser instance. If not given, the # standard {@link XMLParser} parser is used. # @return A (event, elem) iterator. def iterparse(source, events=None, parser=None): if sys.platform == 'cli': raise NotImplementedError('iterparse is not supported on IronPython. (CP #31923)') if not hasattr(source, "read"): source = open(source, "rb") if not parser: parser = XMLParser(target=TreeBuilder()) return _IterParseIterator(source, events, parser) class _IterParseIterator(object): def __init__(self, source, events, parser): self._file = source self._events = [] self._index = 0 self.root = self._root = None self._parser = parser # wire up the parser for event reporting parser = self._parser._parser append = self._events.append if events is None: events = ["end"] for event in events: if event == "start": try: parser.ordered_attributes = 1 parser.specified_attributes = 1 def handler(tag, attrib_in, event=event, append=append, start=self._parser._start_list): append((event, start(tag, attrib_in))) parser.StartElementHandler = handler except AttributeError: def handler(tag, attrib_in, event=event, append=append, start=self._parser._start): append((event, start(tag, attrib_in))) parser.StartElementHandler = handler elif event == "end": def handler(tag, event=event, append=append, end=self._parser._end): append((event, end(tag))) parser.EndElementHandler = handler elif event == "start-ns": def handler(prefix, uri, event=event, append=append): try: uri = (uri or "").encode("ascii") except UnicodeError: pass append((event, (prefix or "", uri or ""))) parser.StartNamespaceDeclHandler = handler elif event == "end-ns": def handler(prefix, event=event, append=append): append((event, None)) parser.EndNamespaceDeclHandler = handler else: raise ValueError("unknown event %r" % event) def next(self): while 1: try: item = self._events[self._index] except IndexError: if self._parser is None: self.root = self._root raise StopIteration # load event buffer del self._events[:] self._index = 0 data = self._file.read(16384) if data: self._parser.feed(data) else: self._root = self._parser.close() self._parser = None else: self._index = self._index + 1 return item def __iter__(self): return self ## # Parses an XML document from a string constant. This function can # be used to embed "XML literals" in Python code. # # @param source A string containing XML data. # @param parser An optional parser instance. If not given, the # standard {@link XMLParser} parser is used. # @return An Element instance. # @defreturn Element def XML(text, parser=None): if not parser: parser = XMLParser(target=TreeBuilder()) parser.feed(text) return parser.close() ## # Parses an XML document from a string constant, and also returns # a dictionary which maps from element id:s to elements. # # @param source A string containing XML data. # @param parser An optional parser instance. If not given, the # standard {@link XMLParser} parser is used. # @return A tuple containing an Element instance and a dictionary. # @defreturn (Element, dictionary) def XMLID(text, parser=None): if not parser: parser = XMLParser(target=TreeBuilder()) parser.feed(text) tree = parser.close() ids = {} for elem in tree.iter(): id = elem.get("id") if id: ids[id] = elem return tree, ids ## # Parses an XML document from a string constant. Same as {@link #XML}. # # @def fromstring(text) # @param source A string containing XML data. # @return An Element instance. # @defreturn Element fromstring = XML ## # Parses an XML document from a sequence of string fragments. # # @param sequence A list or other sequence containing XML data fragments. # @param parser An optional parser instance. If not given, the # standard {@link XMLParser} parser is used. # @return An Element instance. # @defreturn Element # @since 1.3 def fromstringlist(sequence, parser=None): if not parser: parser = XMLParser(target=TreeBuilder()) for text in sequence: parser.feed(text) return parser.close() # -------------------------------------------------------------------- ## # Generic element structure builder. This builder converts a sequence # of {@link #TreeBuilder.start}, {@link #TreeBuilder.data}, and {@link # #TreeBuilder.end} method calls to a well-formed element structure. # <p> # You can use this class to build an element structure using a custom XML # parser, or a parser for some other XML-like format. # # @param element_factory Optional element factory. This factory # is called to create new Element instances, as necessary. class TreeBuilder(object): def __init__(self, element_factory=None): self._data = [] # data collector self._elem = [] # element stack self._last = None # last element self._tail = None # true if we're after an end tag if element_factory is None: element_factory = Element self._factory = element_factory ## # Flushes the builder buffers, and returns the toplevel document # element. # # @return An Element instance. # @defreturn Element def close(self): assert len(self._elem) == 0, "missing end tags" assert self._last is not None, "missing toplevel element" return self._last def _flush(self): if self._data: if self._last is not None: text = "".join(self._data) if self._tail: assert self._last.tail is None, "internal error (tail)" self._last.tail = text else: assert self._last.text is None, "internal error (text)" self._last.text = text self._data = [] ## # Adds text to the current element. # # @param data A string. This should be either an 8-bit string # containing ASCII text, or a Unicode string. def data(self, data): self._data.append(data) ## # Opens a new element. # # @param tag The element name. # @param attrib A dictionary containing element attributes. # @return The opened element. # @defreturn Element def start(self, tag, attrs): self._flush() self._last = elem = self._factory(tag, attrs) if self._elem: self._elem[-1].append(elem) self._elem.append(elem) self._tail = 0 return elem ## # Closes the current element. # # @param tag The element name. # @return The closed element. # @defreturn Element def end(self, tag): self._flush() self._last = self._elem.pop() assert self._last.tag == tag,\ "end tag mismatch (expected %s, got %s)" % ( self._last.tag, tag) self._tail = 1 return self._last ## # Element structure builder for XML source data, based on the # <b>expat</b> parser. # # @keyparam target Target object. If omitted, the builder uses an # instance of the standard {@link #TreeBuilder} class. # @keyparam html Predefine HTML entities. This flag is not supported # by the current implementation. # @keyparam encoding Optional encoding. If given, the value overrides # the encoding specified in the XML file. # @see #ElementTree # @see #TreeBuilder class XMLParser(object): def __init__(self, html=0, target=None, encoding=None): try: from xml.parsers import expat except ImportError: try: import pyexpat as expat except ImportError: raise ImportError( "No module named expat; use SimpleXMLTreeBuilder instead" ) parser = expat.ParserCreate(encoding, "}") if target is None: target = TreeBuilder() # underscored names are provided for compatibility only self.parser = self._parser = parser self.target = self._target = target self._error = expat.error self._names = {} # name memo cache # callbacks parser.DefaultHandlerExpand = self._default parser.StartElementHandler = self._start parser.EndElementHandler = self._end parser.CharacterDataHandler = self._data # optional callbacks parser.CommentHandler = self._comment parser.ProcessingInstructionHandler = self._pi # let expat do the buffering, if supported try: self._parser.buffer_text = 1 except AttributeError: pass # use new-style attribute handling, if supported try: self._parser.ordered_attributes = 1 self._parser.specified_attributes = 1 parser.StartElementHandler = self._start_list except AttributeError: pass self._doctype = None self.entity = {} try: self.version = "Expat %d.%d.%d" % expat.version_info except AttributeError: pass # unknown def _raiseerror(self, value): err = ParseError(value) err.code = value.code err.position = value.lineno, value.offset raise err def _fixtext(self, text): # convert text string to ascii, if possible try: return text.encode("ascii") except UnicodeError: return text def _fixname(self, key): # expand qname, and convert name string to ascii, if possible try: name = self._names[key] except KeyError: name = key if "}" in name: name = "{" + name self._names[key] = name = self._fixtext(name) return name def _start(self, tag, attrib_in): fixname = self._fixname fixtext = self._fixtext tag = fixname(tag) attrib = {} for key, value in attrib_in.items(): attrib[fixname(key)] = fixtext(value) return self.target.start(tag, attrib) def _start_list(self, tag, attrib_in): fixname = self._fixname fixtext = self._fixtext tag = fixname(tag) attrib = {} if attrib_in: for i in range(0, len(attrib_in), 2): attrib[fixname(attrib_in[i])] = fixtext(attrib_in[i+1]) return self.target.start(tag, attrib) def _data(self, text): return self.target.data(self._fixtext(text)) def _end(self, tag): return self.target.end(self._fixname(tag)) def _comment(self, data): try: comment = self.target.comment except AttributeError: pass else: return comment(self._fixtext(data)) def _pi(self, target, data): try: pi = self.target.pi except AttributeError: pass else: return pi(self._fixtext(target), self._fixtext(data)) def _default(self, text): prefix = text[:1] if prefix == "&": # deal with undefined entities try: self.target.data(self.entity[text[1:-1]]) except KeyError: from xml.parsers import expat err = expat.error( "undefined entity %s: line %d, column %d" % (text, self._parser.ErrorLineNumber, self._parser.ErrorColumnNumber) ) err.code = 11 # XML_ERROR_UNDEFINED_ENTITY err.lineno = self._parser.ErrorLineNumber err.offset = self._parser.ErrorColumnNumber raise err elif prefix == "<" and text[:9] == "<!DOCTYPE": self._doctype = [] # inside a doctype declaration elif self._doctype is not None: # parse doctype contents if prefix == ">": self._doctype = None return text = text.strip() if not text: return self._doctype.append(text) n = len(self._doctype) if n > 2: type = self._doctype[1] if type == "PUBLIC" and n == 4: name, type, pubid, system = self._doctype elif type == "SYSTEM" and n == 3: name, type, system = self._doctype pubid = None else: return if pubid: pubid = pubid[1:-1] if hasattr(self.target, "doctype"): self.target.doctype(name, pubid, system[1:-1]) elif self.doctype is not self._XMLParser__doctype: # warn about deprecated call self._XMLParser__doctype(name, pubid, system[1:-1]) self.doctype(name, pubid, system[1:-1]) self._doctype = None ## # (Deprecated) Handles a doctype declaration. # # @param name Doctype name. # @param pubid Public identifier. # @param system System identifier. def doctype(self, name, pubid, system): """This method of XMLParser is deprecated.""" warnings.warn( "This method of XMLParser is deprecated. Define doctype() " "method on the TreeBuilder target.", DeprecationWarning, ) # sentinel, if doctype is redefined in a subclass __doctype = doctype ## # Feeds data to the parser. # # @param data Encoded data. def feed(self, data): try: self._parser.Parse(data, 0) except self._error, v: self._raiseerror(v) ## # Finishes feeding data to the parser. # # @return An element structure. # @defreturn Element def close(self): try: self._parser.Parse("", 1) # end of data except self._error, v: self._raiseerror(v) tree = self.target.close() del self.target, self._parser # get rid of circular references return tree if sys.platform == 'cli': from . import SimpleXMLTreeBuilder XMLParser = SimpleXMLTreeBuilder.TreeBuilder # compatibility XMLTreeBuilder = XMLParser # workaround circular import. try: from ElementC14N import _serialize_c14n _serialize["c14n"] = _serialize_c14n except ImportError: pass
gpl-3.0
adamncasey/servo
tests/wpt/web-platform-tests/tools/manifest/sourcefile.py
25
18485
import hashlib import re import os from six import binary_type from six.moves.urllib.parse import urljoin from fnmatch import fnmatch try: from xml.etree import cElementTree as ElementTree except ImportError: from xml.etree import ElementTree import html5lib from . import XMLParser from .item import Stub, ManualTest, WebdriverSpecTest, RefTestNode, RefTest, TestharnessTest, SupportFile, ConformanceCheckerTest, VisualTest from .utils import rel_path_to_url, ContextManagerBytesIO, cached_property wd_pattern = "*.py" js_meta_re = re.compile(b"//\s*META:\s*(\w*)=(.*)$") python_meta_re = re.compile(b"#\s*META:\s*(\w*)=(.*)$") reference_file_re = re.compile(r'(^|[\-_])(not)?ref[0-9]*([\-_]|$)') def replace_end(s, old, new): """ Given a string `s` that ends with `old`, replace that occurrence of `old` with `new`. """ assert s.endswith(old) return s[:-len(old)] + new def read_script_metadata(f, regexp): """ Yields any metadata (pairs of bytestrings) from the file-like object `f`, as specified according to a supplied regexp. `regexp` - Regexp containing two groups containing the metadata name and value. """ for line in f: assert isinstance(line, binary_type), line m = regexp.match(line) if not m: break yield (m.groups()[0], m.groups()[1]) class SourceFile(object): parsers = {"html":lambda x:html5lib.parse(x, treebuilder="etree"), "xhtml":lambda x:ElementTree.parse(x, XMLParser.XMLParser()), "svg":lambda x:ElementTree.parse(x, XMLParser.XMLParser())} root_dir_non_test = set(["common", "work-in-progress"]) dir_non_test = set(["resources", "support", "tools"]) dir_path_non_test = {("css21", "archive"), ("css", "CSS2", "archive"), ("css", "common"), ("css", "work-in-progress")} def __init__(self, tests_root, rel_path, url_base, contents=None): """Object representing a file in a source tree. :param tests_root: Path to the root of the source tree :param rel_path: File path relative to tests_root :param url_base: Base URL used when converting file paths to urls :param contents: Byte array of the contents of the file or ``None``. """ self.tests_root = tests_root if os.name == "nt": # do slash normalization on Windows if isinstance(rel_path, binary_type): self.rel_path = rel_path.replace(b"/", b"\\") else: self.rel_path = rel_path.replace(u"/", u"\\") else: self.rel_path = rel_path self.url_base = url_base self.contents = contents self.dir_path, self.filename = os.path.split(self.rel_path) self.name, self.ext = os.path.splitext(self.filename) self.type_flag = None if "-" in self.name: self.type_flag = self.name.rsplit("-", 1)[1].split(".")[0] self.meta_flags = self.name.split(".")[1:] self.items_cache = None def __getstate__(self): # Remove computed properties if we pickle this class rv = self.__dict__.copy() if "__cached_properties__" in rv: cached_properties = rv["__cached_properties__"] for key in rv.keys(): if key in cached_properties: del rv[key] del rv["__cached_properties__"] return rv def name_prefix(self, prefix): """Check if the filename starts with a given prefix :param prefix: The prefix to check""" return self.name.startswith(prefix) def is_dir(self): """Return whether this file represents a directory.""" if self.contents is not None: return False return os.path.isdir(self.rel_path) def open(self): """ Return either * the contents specified in the constructor, if any; * a File object opened for reading the file contents. """ if self.contents is not None: file_obj = ContextManagerBytesIO(self.contents) else: file_obj = open(self.path, 'rb') return file_obj @cached_property def path(self): return os.path.join(self.tests_root, self.rel_path) @cached_property def url(self): return rel_path_to_url(self.rel_path, self.url_base) @cached_property def hash(self): with self.open() as f: return hashlib.sha1(f.read()).hexdigest() def in_non_test_dir(self): if self.dir_path == "": return True parts = self.dir_path.split(os.path.sep) if (parts[0] in self.root_dir_non_test or any(item in self.dir_non_test for item in parts) or any(parts[:len(path)] == list(path) for path in self.dir_path_non_test)): return True return False def in_conformance_checker_dir(self): return (self.dir_path == "conformance-checkers" or self.dir_path.startswith("conformance-checkers" + os.path.sep)) @property def name_is_non_test(self): """Check if the file name matches the conditions for the file to be a non-test file""" return (self.is_dir() or self.name_prefix("MANIFEST") or self.filename.startswith(".") or self.type_flag == "support" or self.in_non_test_dir()) @property def name_is_conformance(self): return (self.in_conformance_checker_dir() and self.type_flag in ("is-valid", "no-valid")) @property def name_is_conformance_support(self): return self.in_conformance_checker_dir() @property def name_is_stub(self): """Check if the file name matches the conditions for the file to be a stub file""" return self.name_prefix("stub-") @property def name_is_manual(self): """Check if the file name matches the conditions for the file to be a manual test file""" return self.type_flag == "manual" @property def name_is_visual(self): """Check if the file name matches the conditions for the file to be a visual test file""" return self.type_flag == "visual" @property def name_is_multi_global(self): """Check if the file name matches the conditions for the file to be a multi-global js test file""" return "any" in self.meta_flags and self.ext == ".js" @property def name_is_worker(self): """Check if the file name matches the conditions for the file to be a worker js test file""" return "worker" in self.meta_flags and self.ext == ".js" @property def name_is_window(self): """Check if the file name matches the conditions for the file to be a window js test file""" return "window" in self.meta_flags and self.ext == ".js" @property def name_is_webdriver(self): """Check if the file name matches the conditions for the file to be a webdriver spec test file""" # wdspec tests are in subdirectories of /webdriver excluding __init__.py # files. rel_dir_tree = self.rel_path.split(os.path.sep) return (rel_dir_tree[0] == "webdriver" and len(rel_dir_tree) > 1 and self.filename != "__init__.py" and fnmatch(self.filename, wd_pattern)) @property def name_is_reference(self): """Check if the file name matches the conditions for the file to be a reference file (not a reftest)""" return "/reference/" in self.url or "/reftest/" in self.url or bool(reference_file_re.search(self.name)) @property def markup_type(self): """Return the type of markup contained in a file, based on its extension, or None if it doesn't contain markup""" ext = self.ext if not ext: return None if ext[0] == ".": ext = ext[1:] if ext in ["html", "htm"]: return "html" if ext in ["xhtml", "xht", "xml"]: return "xhtml" if ext == "svg": return "svg" return None @cached_property def root(self): """Return an ElementTree Element for the root node of the file if it contains markup, or None if it does not""" if not self.markup_type: return None parser = self.parsers[self.markup_type] with self.open() as f: try: tree = parser(f) except Exception: return None if hasattr(tree, "getroot"): root = tree.getroot() else: root = tree return root @cached_property def timeout_nodes(self): """List of ElementTree Elements corresponding to nodes in a test that specify timeouts""" return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='timeout']") @cached_property def script_metadata(self): if self.name_is_worker or self.name_is_multi_global or self.name_is_window: regexp = js_meta_re elif self.name_is_webdriver: regexp = python_meta_re else: return None with self.open() as f: return list(read_script_metadata(f, regexp)) @cached_property def timeout(self): """The timeout of a test or reference file. "long" if the file has an extended timeout or None otherwise""" if self.script_metadata: if any(m == (b"timeout", b"long") for m in self.script_metadata): return "long" if self.root is None: return None if self.timeout_nodes: timeout_str = self.timeout_nodes[0].attrib.get("content", None) if timeout_str and timeout_str.lower() == "long": return "long" return None @cached_property def viewport_nodes(self): """List of ElementTree Elements corresponding to nodes in a test that specify viewport sizes""" return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='viewport-size']") @cached_property def viewport_size(self): """The viewport size of a test or reference file""" if self.root is None: return None if not self.viewport_nodes: return None return self.viewport_nodes[0].attrib.get("content", None) @cached_property def dpi_nodes(self): """List of ElementTree Elements corresponding to nodes in a test that specify device pixel ratios""" return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='device-pixel-ratio']") @cached_property def dpi(self): """The device pixel ratio of a test or reference file""" if self.root is None: return None if not self.dpi_nodes: return None return self.dpi_nodes[0].attrib.get("content", None) @cached_property def testharness_nodes(self): """List of ElementTree Elements corresponding to nodes representing a testharness.js script""" return self.root.findall(".//{http://www.w3.org/1999/xhtml}script[@src='/resources/testharness.js']") @cached_property def content_is_testharness(self): """Boolean indicating whether the file content represents a testharness.js test""" if self.root is None: return None return bool(self.testharness_nodes) @cached_property def variant_nodes(self): """List of ElementTree Elements corresponding to nodes representing a test variant""" return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='variant']") @cached_property def test_variants(self): rv = [] for element in self.variant_nodes: if "content" in element.attrib: variant = element.attrib["content"] assert variant == "" or variant[0] in ["#", "?"] rv.append(variant) if not rv: rv = [""] return rv @cached_property def reftest_nodes(self): """List of ElementTree Elements corresponding to nodes representing a to a reftest <link>""" if self.root is None: return [] match_links = self.root.findall(".//{http://www.w3.org/1999/xhtml}link[@rel='match']") mismatch_links = self.root.findall(".//{http://www.w3.org/1999/xhtml}link[@rel='mismatch']") return match_links + mismatch_links @cached_property def references(self): """List of (ref_url, relation) tuples for any reftest references specified in the file""" rv = [] rel_map = {"match": "==", "mismatch": "!="} for item in self.reftest_nodes: if "href" in item.attrib: ref_url = urljoin(self.url, item.attrib["href"]) ref_type = rel_map[item.attrib["rel"]] rv.append((ref_url, ref_type)) return rv @cached_property def content_is_ref_node(self): """Boolean indicating whether the file is a non-leaf node in a reftest graph (i.e. if it contains any <link rel=[mis]match>""" return bool(self.references) @cached_property def css_flag_nodes(self): """List of ElementTree Elements corresponding to nodes representing a flag <meta>""" if self.root is None: return [] return self.root.findall(".//{http://www.w3.org/1999/xhtml}meta[@name='flags']") @cached_property def css_flags(self): """Set of flags specified in the file""" rv = set() for item in self.css_flag_nodes: if "content" in item.attrib: for flag in item.attrib["content"].split(): rv.add(flag) return rv @cached_property def content_is_css_manual(self): """Boolean indicating whether the file content represents a CSS WG-style manual test""" if self.root is None: return None # return True if the intersection between the two sets is non-empty return bool(self.css_flags & {"animated", "font", "history", "interact", "paged", "speech", "userstyle"}) @cached_property def spec_link_nodes(self): """List of ElementTree Elements corresponding to nodes representing a <link rel=help>, used to point to specs""" if self.root is None: return [] return self.root.findall(".//{http://www.w3.org/1999/xhtml}link[@rel='help']") @cached_property def spec_links(self): """Set of spec links specified in the file""" rv = set() for item in self.spec_link_nodes: if "href" in item.attrib: rv.add(item.attrib["href"]) return rv @cached_property def content_is_css_visual(self): """Boolean indicating whether the file content represents a CSS WG-style manual test""" if self.root is None: return None return bool(self.ext in {'.xht', '.html', '.xhtml', '.htm', '.xml', '.svg'} and self.spec_links) @property def type(self): rv, _ = self.manifest_items() return rv def manifest_items(self): """List of manifest items corresponding to the file. There is typically one per test, but in the case of reftests a node may have corresponding manifest items without being a test itself.""" if self.items_cache: return self.items_cache if self.name_is_non_test: rv = "support", [SupportFile(self)] elif self.name_is_stub: rv = Stub.item_type, [Stub(self, self.url)] elif self.name_is_manual: rv = ManualTest.item_type, [ManualTest(self, self.url)] elif self.name_is_conformance: rv = ConformanceCheckerTest.item_type, [ConformanceCheckerTest(self, self.url)] elif self.name_is_conformance_support: rv = "support", [SupportFile(self)] elif self.name_is_visual: rv = VisualTest.item_type, [VisualTest(self, self.url)] elif self.name_is_multi_global: rv = TestharnessTest.item_type, [ TestharnessTest(self, replace_end(self.url, ".any.js", ".any.html"), timeout=self.timeout), TestharnessTest(self, replace_end(self.url, ".any.js", ".any.worker.html"), timeout=self.timeout), ] elif self.name_is_worker: rv = (TestharnessTest.item_type, [TestharnessTest(self, replace_end(self.url, ".worker.js", ".worker.html"), timeout=self.timeout)]) elif self.name_is_window: rv = (TestharnessTest.item_type, [TestharnessTest(self, replace_end(self.url, ".window.js", ".window.html"), timeout=self.timeout)]) elif self.name_is_webdriver: rv = WebdriverSpecTest.item_type, [WebdriverSpecTest(self, self.url, timeout=self.timeout)] elif self.content_is_css_manual and not self.name_is_reference: rv = ManualTest.item_type, [ManualTest(self, self.url)] elif self.content_is_testharness: rv = TestharnessTest.item_type, [] for variant in self.test_variants: url = self.url + variant rv[1].append(TestharnessTest(self, url, timeout=self.timeout)) elif self.content_is_ref_node: rv = (RefTestNode.item_type, [RefTestNode(self, self.url, self.references, timeout=self.timeout, viewport_size=self.viewport_size, dpi=self.dpi)]) elif self.content_is_css_visual and not self.name_is_reference: rv = VisualTest.item_type, [VisualTest(self, self.url)] else: rv = "support", [SupportFile(self)] self.items_cache = rv return rv
mpl-2.0
ffu/DSA-3.2.2
gr-usrp2/src/qa_usrp2.py
5
1237
#!/usr/bin/env python # # Copyright 2005,2008 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, gr_unittest import usrp2 class qa_usrp2(gr_unittest.TestCase): def setUp(self): self.tb = gr.top_block() def tearDown(self): self.tb = None def test_000_nop (self): """Just see if we can import the module... They may not have a USRP2 connected, etc. Don't try to run anything""" pass if __name__ == '__main__': gr_unittest.main ()
gpl-3.0
QLGu/django-oscar
tests/functional/dashboard/order_tests.py
22
12420
from django.conf import settings from django.core.urlresolvers import reverse from django.utils.six.moves import http_client from oscar.core.loading import get_model from oscar.apps.order.models import ( Order, OrderNote, PaymentEvent, PaymentEventType) from oscar.test.factories import PartnerFactory, ShippingAddressFactory from oscar.test.factories import create_order, create_basket from oscar.test.testcases import WebTestCase from oscar.test.factories import SourceTypeFactory Basket = get_model('basket', 'Basket') Partner = get_model('partner', 'Partner') ShippingAddress = get_model('order', 'ShippingAddress') class TestOrderListDashboard(WebTestCase): is_staff = True def test_redirects_to_detail_page(self): order = create_order() page = self.get(reverse('dashboard:order-list')) form = page.forms['search_form'] form['order_number'] = order.number response = form.submit() self.assertEqual(http_client.FOUND, response.status_code) def test_downloads_to_csv_without_error(self): address = ShippingAddressFactory() create_order(shipping_address=address) page = self.get(reverse('dashboard:order-list')) form = page.forms['orders_form'] form['selected_order'].checked = True form.submit('download_selected') def test_allows_order_number_search(self): page = self.get(reverse('dashboard:order-list')) form = page.forms['search_form'] form['order_number'] = '+' form.submit() class PermissionBasedDashboardOrderTestsBase(WebTestCase): permissions = ['partner.dashboard_access', ] username = 'user1@example.com' def setUp(self): """ Creates two orders. order_in has self.user in it's partner users list. """ super(PermissionBasedDashboardOrderTestsBase, self).setUp() self.address = ShippingAddressFactory() self.basket_in = create_basket() self.basket_out = create_basket() # replace partner with one that has the user in it's users list self.partner_in = PartnerFactory(users=[self.user]) stockrecord = self.basket_in.lines.all()[0].stockrecord stockrecord.partner = self.partner_in stockrecord.save() self.order_in = create_order(basket=self.basket_in, shipping_address=self.address) self.order_out = create_order(basket=self.basket_out, shipping_address=self.address) class PermissionBasedDashboardOrderTestsNoStaff(PermissionBasedDashboardOrderTestsBase): is_staff = False def test_non_staff_can_only_list_her_orders(self): # order-list user1 response = self.get(reverse('dashboard:order-list')) self.assertEqual(set(response.context['orders']), set([self.order_in])) # order-detail user2 url = reverse('dashboard:order-detail', kwargs={'number': self.order_in.number}) self.assertIsOk(self.get(url)) url = reverse('dashboard:order-detail', kwargs={'number': self.order_out.number}) self.assertNoAccess(self.get(url, status="*")) # order-line-detail user2 url = reverse('dashboard:order-line-detail', kwargs={'number': self.order_in.number, 'line_id': self.order_in.lines.all()[0].pk}) self.assertIsOk(self.get(url)) url = reverse('dashboard:order-line-detail', kwargs={'number': self.order_out.number, 'line_id': self.order_out.lines.all()[0].pk}) self.assertNoAccess(self.get(url, status="*")) # order-shipping-address url = reverse('dashboard:order-shipping-address', kwargs={'number': self.order_in.number}) self.assertIsOk(self.get(url)) url = reverse('dashboard:order-shipping-address', kwargs={'number': self.order_out.number}) self.assertNoAccess(self.get(url, status="*")) class PermissionBasedDashboardOrderTestsStaff(PermissionBasedDashboardOrderTestsBase): is_staff = True def test_staff_user_can_list_all_orders(self): orders = [self.order_in, self.order_out] # order-list response = self.get(reverse('dashboard:order-list')) self.assertIsOk(response) self.assertEqual(set(response.context['orders']), set(orders)) # order-detail for order in orders: url = reverse('dashboard:order-detail', kwargs={'number': order.number}) self.assertIsOk(self.get(url)) class TestOrderListSearch(WebTestCase): is_staff = True TEST_CASES = [ ({}, []), ( {'order_number': 'abcd1234'}, ['Order number starts with "abcd1234"'] ), ( {'name': 'Bob Smith'}, ['Customer name matches "Bob Smith"'] ), ( {'product_title': 'The Art of War'}, ['Product name matches "The Art of War"'] ), ( {'upc': 'abcd1234'}, ['Includes an item with UPC "abcd1234"'] ), ( {'partner_sku': 'abcd1234'}, ['Includes an item with partner SKU "abcd1234"'] ), ( {'date_from': '2015-01-01'}, ['Placed after 2015-01-01'] ), ( {'date_to': '2015-01-01'}, ['Placed before 2015-01-02'] ), ( {'date_from': '2014-01-02', 'date_to': '2015-03-04'}, ['Placed between 2014-01-02 and 2015-03-04'] ), ( {'voucher': 'abcd1234'}, ['Used voucher code "abcd1234"'] ), ( {'payment_method': 'visa'}, ['Paid using Visa'] ), ( # Assumes that the test settings (OSCAR_ORDER_STATUS_PIPELINE) # include a state called 'A' {'status': 'A'}, ['Order status is A'] ), ( { 'name': 'Bob Smith', 'product_title': 'The Art of War', 'upc': 'upc_abcd1234', 'partner_sku': 'partner_avcd1234', 'date_from': '2014-01-02', 'date_to': '2015-03-04', 'voucher': 'voucher_abcd1234', 'payment_method': 'visa', 'status': 'A' }, [ 'Customer name matches "Bob Smith"', 'Product name matches "The Art of War"', 'Includes an item with UPC "upc_abcd1234"', 'Includes an item with partner SKU "partner_avcd1234"', 'Placed between 2014-01-02 and 2015-03-04', 'Used voucher code "voucher_abcd1234"', 'Paid using Visa', 'Order status is A', ] ), ] def test_search_filter_descriptions(self): SourceTypeFactory(name='Visa', code='visa') url = reverse('dashboard:order-list') for params, expected_filters in self.TEST_CASES: # Need to provide the order number parameter to avoid # being short-circuited to "all results". params.setdefault('order_number', '') response = self.get(url, params=params) self.assertEqual(response.status_code, 200) applied_filters = [ el.text.strip() for el in response.html.select('.search-filter-list .label') ] assert applied_filters == expected_filters class TestOrderDetailPage(WebTestCase): is_staff = True def setUp(self): super(TestOrderDetailPage, self).setUp() # ensures that initial statuses are as expected self.order = create_order() self.event_type = PaymentEventType.objects.create(name='foo') url = reverse('dashboard:order-detail', kwargs={'number': self.order.number}) self.page = self.get(url) def test_contains_order(self): self.assertEqual(self.page.context['order'], self.order) def test_allows_notes_to_be_added(self): form = self.page.forms['order_note_form'] form['message'] = "boom" response = form.submit() self.assertIsRedirect(response) notes = self.order.notes.all() self.assertEqual(1, len(notes)) def test_allows_line_status_to_be_changed(self): line = self.order.lines.all()[0] self.assertEqual(line.status, settings.OSCAR_INITIAL_LINE_STATUS) form = self.page.forms['order_lines_form'] form['line_action'] = 'change_line_statuses' form['new_status'] = new_status = 'b' form['selected_line'] = [line.pk] form.submit() # fetch line again self.assertEqual(self.order.lines.all()[0].status, new_status) def test_allows_order_status_to_be_changed(self): form = self.page.forms['order_status_form'] self.assertEqual( self.order.status, settings.OSCAR_INITIAL_ORDER_STATUS) form = self.page.forms['order_status_form'] form['new_status'] = new_status = 'B' form.submit() # fetch order again self.assertEqual(Order.objects.get(pk=self.order.pk).status, new_status) def test_allows_creating_payment_event(self): line = self.order.lines.all()[0] form = self.page.forms['order_lines_form'] form['line_action'] = 'create_payment_event' form['selected_line'] = [line.pk] form['payment_event_type'] = self.event_type.code form.submit() self.assertTrue(PaymentEvent.objects.exists()) class TestChangingOrderStatus(WebTestCase): is_staff = True def setUp(self): super(TestChangingOrderStatus, self).setUp() Order.pipeline = {'A': ('B', 'C')} self.order = create_order(status='A') url = reverse('dashboard:order-detail', kwargs={'number': self.order.number}) page = self.get(url) form = page.forms['order_status_form'] form['new_status'] = 'B' self.response = form.submit() def reload_order(self): return Order.objects.get(number=self.order.number) def test_works(self): self.assertIsRedirect(self.response) self.assertEqual('B', self.reload_order().status) def test_creates_system_note(self): notes = self.order.notes.all() self.assertEqual(1, len(notes)) self.assertEqual(OrderNote.SYSTEM, notes[0].note_type) class TestChangingOrderStatusFromFormOnOrderListView(WebTestCase): is_staff = True def setUp(self): super(TestChangingOrderStatusFromFormOnOrderListView, self).setUp() Order.pipeline = {'A': ('B', 'C'), 'B': ('A', 'C'), 'C': ('A', 'B')} self.order = create_order(status='A') url = reverse('dashboard:order-list') page = self.get(url) form = page.forms['orders_form'] form['new_status'] = 'B' form['selected_order'] = self.order.pk self.response = form.submit(name='action', value='change_order_statuses') def reload_order(self): return Order.objects.get(number=self.order.number) def test_works(self): self.assertIsRedirect(self.response) # Has the order status been changed? self.assertEqual('B', self.reload_order().status) # Is a system note created? notes = self.order.notes.all() self.assertEqual(1, len(notes)) self.assertEqual(OrderNote.SYSTEM, notes[0].note_type) class LineDetailTests(WebTestCase): is_staff = True def setUp(self): self.order = create_order() self.line = self.order.lines.all()[0] self.url = reverse('dashboard:order-line-detail', kwargs={'number': self.order.number, 'line_id': self.line.id}) super(LineDetailTests, self).setUp() def test_line_detail_page_exists(self): response = self.get(self.url) self.assertIsOk(response) def test_line_in_context(self): response = self.get(self.url) self.assertInContext(response, 'line')
bsd-3-clause
keithlee/shakeAppPyDev
django/views/generic/edit.py
159
7457
from django.forms import models as model_forms from django.core.exceptions import ImproperlyConfigured from django.http import HttpResponseRedirect from django.views.generic.base import TemplateResponseMixin, View from django.views.generic.detail import (SingleObjectMixin, SingleObjectTemplateResponseMixin, BaseDetailView) class FormMixin(object): """ A mixin that provides a way to show and handle a form in a request. """ initial = {} form_class = None success_url = None def get_initial(self): """ Returns the initial data to use for forms on this view. """ return self.initial def get_form_class(self): """ Returns the form class to use in this view """ return self.form_class def get_form(self, form_class): """ Returns an instance of the form to be used in this view. """ return form_class(**self.get_form_kwargs()) def get_form_kwargs(self): """ Returns the keyword arguments for instanciating the form. """ kwargs = {'initial': self.get_initial()} if self.request.method in ('POST', 'PUT'): kwargs.update({ 'data': self.request.POST, 'files': self.request.FILES, }) return kwargs def get_context_data(self, **kwargs): return kwargs def get_success_url(self): if self.success_url: url = self.success_url else: raise ImproperlyConfigured( "No URL to redirect to. Provide a success_url.") return url def form_valid(self, form): return HttpResponseRedirect(self.get_success_url()) def form_invalid(self, form): return self.render_to_response(self.get_context_data(form=form)) class ModelFormMixin(FormMixin, SingleObjectMixin): """ A mixin that provides a way to show and handle a modelform in a request. """ def get_form_class(self): """ Returns the form class to use in this view """ if self.form_class: return self.form_class else: if self.model is not None: # If a model has been explicitly provided, use it model = self.model elif hasattr(self, 'object') and self.object is not None: # If this view is operating on a single object, use # the class of that object model = self.object.__class__ else: # Try to get a queryset and extract the model class # from that model = self.get_queryset().model return model_forms.modelform_factory(model) def get_form_kwargs(self): """ Returns the keyword arguments for instanciating the form. """ kwargs = super(ModelFormMixin, self).get_form_kwargs() kwargs.update({'instance': self.object}) return kwargs def get_success_url(self): if self.success_url: url = self.success_url % self.object.__dict__ else: try: url = self.object.get_absolute_url() except AttributeError: raise ImproperlyConfigured( "No URL to redirect to. Either provide a url or define" " a get_absolute_url method on the Model.") return url def form_valid(self, form): self.object = form.save() return super(ModelFormMixin, self).form_valid(form) def get_context_data(self, **kwargs): context = kwargs if self.object: context['object'] = self.object context_object_name = self.get_context_object_name(self.object) if context_object_name: context[context_object_name] = self.object return context class ProcessFormView(View): """ A mixin that processes a form on POST. """ def get(self, request, *args, **kwargs): form_class = self.get_form_class() form = self.get_form(form_class) return self.render_to_response(self.get_context_data(form=form)) def post(self, request, *args, **kwargs): form_class = self.get_form_class() form = self.get_form(form_class) if form.is_valid(): return self.form_valid(form) else: return self.form_invalid(form) # PUT is a valid HTTP verb for creating (with a known URL) or editing an # object, note that browsers only support POST for now. def put(self, *args, **kwargs): return self.post(*args, **kwargs) class BaseFormView(FormMixin, ProcessFormView): """ A base view for displaying a form """ class FormView(TemplateResponseMixin, BaseFormView): """ A view for displaying a form, and rendering a template response. """ class BaseCreateView(ModelFormMixin, ProcessFormView): """ Base view for creating an new object instance. Using this base class requires subclassing to provide a response mixin. """ def get(self, request, *args, **kwargs): self.object = None return super(BaseCreateView, self).get(request, *args, **kwargs) def post(self, request, *args, **kwargs): self.object = None return super(BaseCreateView, self).post(request, *args, **kwargs) class CreateView(SingleObjectTemplateResponseMixin, BaseCreateView): """ View for creating an new object instance, with a response rendered by template. """ template_name_suffix = '_form' class BaseUpdateView(ModelFormMixin, ProcessFormView): """ Base view for updating an existing object. Using this base class requires subclassing to provide a response mixin. """ def get(self, request, *args, **kwargs): self.object = self.get_object() return super(BaseUpdateView, self).get(request, *args, **kwargs) def post(self, request, *args, **kwargs): self.object = self.get_object() return super(BaseUpdateView, self).post(request, *args, **kwargs) class UpdateView(SingleObjectTemplateResponseMixin, BaseUpdateView): """ View for updating an object, with a response rendered by template.. """ template_name_suffix = '_form' class DeletionMixin(object): """ A mixin providing the ability to delete objects """ success_url = None def delete(self, request, *args, **kwargs): self.object = self.get_object() self.object.delete() return HttpResponseRedirect(self.get_success_url()) # Add support for browsers which only accept GET and POST for now. def post(self, *args, **kwargs): return self.delete(*args, **kwargs) def get_success_url(self): if self.success_url: return self.success_url else: raise ImproperlyConfigured( "No URL to redirect to. Provide a success_url.") class BaseDeleteView(DeletionMixin, BaseDetailView): """ Base view for deleting an object. Using this base class requires subclassing to provide a response mixin. """ class DeleteView(SingleObjectTemplateResponseMixin, BaseDeleteView): """ View for deleting an object retrieved with `self.get_object()`, with a response rendered by template. """ template_name_suffix = '_confirm_delete'
bsd-3-clause
hdinsight/hue
desktop/core/ext-py/boto-2.38.0/boto/file/simpleresultset.py
264
1321
# Copyright 2010 Google Inc. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. class SimpleResultSet(list): """ ResultSet facade built from a simple list, rather than via XML parsing. """ def __init__(self, input_list): for x in input_list: self.append(x) self.is_truncated = False
apache-2.0
mitodl/odl-video-service
odl_video/urls.py
1
1110
"""odl_video URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.11/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url, include from django.contrib import admin urlpatterns = [ url(r"^admin/", admin.site.urls), url(r"^status/", include("server_status.urls")), url(r"^", include("ui.urls")), url(r"^", include("cloudsync.urls")), url(r"^hijack/", include("hijack.urls", namespace="hijack")), ] handler403 = "ui.views.permission_denied_403_view" handler404 = "ui.views.page_not_found_404_view" handler500 = "ui.views.error_500_view"
bsd-3-clause
akazakov/ansible-modules-core
cloud/openstack/os_subnet.py
21
11273
#!/usr/bin/python #coding: utf-8 -*- # (c) 2013, Benno Joy <benno@ansible.com> # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. try: import shade HAS_SHADE = True except ImportError: HAS_SHADE = False DOCUMENTATION = ''' --- module: os_subnet short_description: Add/Remove subnet to an OpenStack network extends_documentation_fragment: openstack version_added: "2.0" author: "Monty Taylor (@emonty)" description: - Add or Remove a subnet to an OpenStack network options: state: description: - Indicate desired state of the resource choices: ['present', 'absent'] required: false default: present network_name: description: - Name of the network to which the subnet should be attached required: true when state is 'present' name: description: - The name of the subnet that should be created. Although Neutron allows for non-unique subnet names, this module enforces subnet name uniqueness. required: true cidr: description: - The CIDR representation of the subnet that should be assigned to the subnet. required: true when state is 'present' default: None ip_version: description: - The IP version of the subnet 4 or 6 required: false default: 4 enable_dhcp: description: - Whether DHCP should be enabled for this subnet. required: false default: true gateway_ip: description: - The ip that would be assigned to the gateway for this subnet required: false default: None dns_nameservers: description: - List of DNS nameservers for this subnet. required: false default: None allocation_pool_start: description: - From the subnet pool the starting address from which the IP should be allocated. required: false default: None allocation_pool_end: description: - From the subnet pool the last IP that should be assigned to the virtual machines. required: false default: None host_routes: description: - A list of host route dictionaries for the subnet. required: false default: None ipv6_ra_mode: description: - IPv6 router advertisement mode choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac'] required: false default: None ipv6_address_mode: description: - IPv6 address mode choices: ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac'] required: false default: None requirements: - "python >= 2.6" - "shade" ''' EXAMPLES = ''' # Create a new (or update an existing) subnet on the specified network - os_subnet: state: present network_name: network1 name: net1subnet cidr: 192.168.0.0/24 dns_nameservers: - 8.8.8.7 - 8.8.8.8 host_routes: - destination: 0.0.0.0/0 nexthop: 12.34.56.78 - destination: 192.168.0.0/24 nexthop: 192.168.0.1 # Delete a subnet - os_subnet: state: absent name: net1subnet # Create an ipv6 stateless subnet - os_subnet: state: present name: intv6 network_name: internal ip_version: 6 cidr: 2db8:1::/64 dns_nameservers: - 2001:4860:4860::8888 - 2001:4860:4860::8844 ipv6_ra_mode: dhcpv6-stateless ipv6_address_mode: dhcpv6-stateless ''' def _can_update(subnet, module, cloud): """Check for differences in non-updatable values""" network_name = module.params['network_name'] cidr = module.params['cidr'] ip_version = int(module.params['ip_version']) ipv6_ra_mode = module.params['ipv6_ra_mode'] ipv6_a_mode = module.params['ipv6_address_mode'] if network_name: network = cloud.get_network(network_name) if network: netid = network['id'] else: module.fail_json(msg='No network found for %s' % network_name) if netid != subnet['network_id']: module.fail_json(msg='Cannot update network_name in existing \ subnet') if ip_version and subnet['ip_version'] != ip_version: module.fail_json(msg='Cannot update ip_version in existing subnet') if ipv6_ra_mode and subnet.get('ipv6_ra_mode', None) != ip_version: module.fail_json(msg='Cannot update ipv6_ra_mode in existing subnet') if ipv6_a_mode and subnet.get('ipv6_address_mode', None) != ipv6_a_mode: module.fail_json(msg='Cannot update ipv6_address_mode in existing \ subnet') def _needs_update(subnet, module, cloud): """Check for differences in the updatable values.""" # First check if we are trying to update something we're not allowed to _can_update(subnet, module, cloud) # now check for the things we are allowed to update enable_dhcp = module.params['enable_dhcp'] subnet_name = module.params['name'] pool_start = module.params['allocation_pool_start'] pool_end = module.params['allocation_pool_end'] gateway_ip = module.params['gateway_ip'] dns = module.params['dns_nameservers'] host_routes = module.params['host_routes'] curr_pool = subnet['allocation_pools'][0] if subnet['enable_dhcp'] != enable_dhcp: return True if subnet_name and subnet['name'] != subnet_name: return True if pool_start and curr_pool['start'] != pool_start: return True if pool_end and curr_pool['end'] != pool_end: return True if gateway_ip and subnet['gateway_ip'] != gateway_ip: return True if dns and sorted(subnet['dns_nameservers']) != sorted(dns): return True if host_routes: curr_hr = sorted(subnet['host_routes'], key=lambda t: t.keys()) new_hr = sorted(host_routes, key=lambda t: t.keys()) if sorted(curr_hr) != sorted(new_hr): return True return False def _system_state_change(module, subnet, cloud): state = module.params['state'] if state == 'present': if not subnet: return True return _needs_update(subnet, module, cloud) if state == 'absent' and subnet: return True return False def main(): ipv6_mode_choices = ['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac'] argument_spec = openstack_full_argument_spec( name=dict(required=True), network_name=dict(default=None), cidr=dict(default=None), ip_version=dict(default='4', choices=['4', '6']), enable_dhcp=dict(default='true', type='bool'), gateway_ip=dict(default=None), dns_nameservers=dict(default=None, type='list'), allocation_pool_start=dict(default=None), allocation_pool_end=dict(default=None), host_routes=dict(default=None, type='list'), ipv6_ra_mode=dict(default=None, choice=ipv6_mode_choices), ipv6_address_mode=dict(default=None, choice=ipv6_mode_choices), state=dict(default='present', choices=['absent', 'present']), ) module_kwargs = openstack_module_kwargs() module = AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs) if not HAS_SHADE: module.fail_json(msg='shade is required for this module') state = module.params['state'] network_name = module.params['network_name'] cidr = module.params['cidr'] ip_version = module.params['ip_version'] enable_dhcp = module.params['enable_dhcp'] subnet_name = module.params['name'] gateway_ip = module.params['gateway_ip'] dns = module.params['dns_nameservers'] pool_start = module.params['allocation_pool_start'] pool_end = module.params['allocation_pool_end'] host_routes = module.params['host_routes'] ipv6_ra_mode = module.params['ipv6_ra_mode'] ipv6_a_mode = module.params['ipv6_address_mode'] # Check for required parameters when state == 'present' if state == 'present': for p in ['network_name', 'cidr']: if not module.params[p]: module.fail_json(msg='%s required with present state' % p) if pool_start and pool_end: pool = [dict(start=pool_start, end=pool_end)] elif pool_start or pool_end: module.fail_json(msg='allocation pool requires start and end values') else: pool = None try: cloud = shade.openstack_cloud(**module.params) subnet = cloud.get_subnet(subnet_name) if module.check_mode: module.exit_json(changed=_system_state_change(module, subnet, cloud)) if state == 'present': if not subnet: subnet = cloud.create_subnet(network_name, cidr, ip_version=ip_version, enable_dhcp=enable_dhcp, subnet_name=subnet_name, gateway_ip=gateway_ip, dns_nameservers=dns, allocation_pools=pool, host_routes=host_routes, ipv6_ra_mode=ipv6_ra_mode, ipv6_address_mode=ipv6_a_mode) changed = True else: if _needs_update(subnet, module, cloud): cloud.update_subnet(subnet['id'], subnet_name=subnet_name, enable_dhcp=enable_dhcp, gateway_ip=gateway_ip, dns_nameservers=dns, allocation_pools=pool, host_routes=host_routes) changed = True else: changed = False module.exit_json(changed=changed, subnet=subnet, id=subnet['id']) elif state == 'absent': if not subnet: changed = False else: changed = True cloud.delete_subnet(subnet_name) module.exit_json(changed=changed) except shade.OpenStackCloudException as e: module.fail_json(msg=e.message) # this is magic, see lib/ansible/module_common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * if __name__ == '__main__': main()
gpl-3.0
deuxpi/pytrainer
imports/file_gpxplus.py
1
3935
# -*- coding: iso-8859-1 -*- #Copyright (C) Fiz Vazquez vud1@sindominio.net # Modified by dgranda #This program is free software; you can redistribute it and/or #modify it under the terms of the GNU General Public License #as published by the Free Software Foundation; either version 2 #of the License, or (at your option) any later version. #This program is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #GNU General Public License for more details. #You should have received a copy of the GNU General Public License #along with this program; if not, write to the Free Software #Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. import logging import os #import StringIO from lxml import etree from pytrainer.lib.date import Date class gpxplus(): def __init__(self, parent = None, data_path = None): self.parent = parent self.pytrainer_main = parent.parent self.tmpdir = self.pytrainer_main.profile.tmpdir self.main_data_path = data_path self.data_path = os.path.dirname(__file__) self.xmldoc = None self.activitiesSummary = [] def getXmldoc(self): ''' Function to return parsed xmlfile ''' return self.xmldoc def getFileType(self): return _("GPS eXchange file") def getActivitiesSummary(self): return self.activitiesSummary def testFile(self, filename): logging.debug('>>') logging.debug("Testing " + filename) #Check if file is a GPX try: #parse as xml xmldoc = etree.parse(filename) #Parse XML schema xmlschema_doc = etree.parse(self.main_data_path+"schemas/Topografix_gpx11.xsd") xmlschema = etree.XMLSchema(xmlschema_doc) if (xmlschema.validate(xmldoc)): #Valid gpx file self.xmldoc = xmldoc startTime = self.getDateTime(self.startTimeFromFile(xmldoc)) indatabase = self.inDatabase(xmldoc, startTime) sport = self.getSport(xmldoc) duration = self.getDetails(xmldoc, startTime) distance = "" self.activitiesSummary.append( (0, indatabase, startTime[1].strftime("%Y-%m-%dT%H:%M:%S"), distance , str(duration), sport, ) ) return True except: #Not gpx file logging.debug("Traceback: %s" % traceback.format_exc()) return False return False def getDateTime(self, time_): return Date().getDateTime(time_) def inDatabase(self, tree, startTime): #comparing date and start time (sport may have been changed in DB after import) time = startTime if time is None: return False time = time[0].strftime("%Y-%m-%dT%H:%M:%SZ") if self.parent.parent.ddbb.select("records","*","date_time_utc=\"%s\"" % (time)): return True else: return False def getDetails(self, tree, startTime): root = tree.getroot() #Get all times from file times = root.findall(".//{http://www.topografix.com/GPX/1/1}time") time = times[-1].text return self.getDateTime(time)[0]-startTime[0] def getSport(self, tree): #No sport in GPX file return None def startTimeFromFile(self, tree): """ Function to return the first time element from a GPX 1.1 file (skipping not mandatory metadata section) """ root = tree.getroot() timeElement = root.xpath(".//g:time[not(parent::g:metadata)]", namespaces={'g':'http://www.topografix.com/GPX/1/1'})[0] if timeElement is not None: return timeElement.text return None def getGPXFile(self, ID, file_id): """ Generate GPX file based on activity ID Returns (sport, GPX filename) """ sport = None gpxFile = None if ID == "0": #Only one activity in file gpxFile = "%s/gpx-%s-%s.gpx" % (self.tmpdir, file_id, ID) sport = self.getSport(self.xmldoc) self.createGPXfile(gpxFile, self.xmldoc) return sport, gpxFile def createGPXfile(self, gpxfile, tree): tree.write(gpxfile, xml_declaration=True, encoding='UTF-8')
gpl-2.0
maciejkula/scipy
scipy/sparse/linalg/tests/test_matfuncs.py
3
18818
#!/usr/bin/env python # # Created by: Pearu Peterson, March 2002 # """ Test functions for scipy.linalg.matfuncs module """ from __future__ import division, print_function, absolute_import import math import warnings import numpy as np from numpy import array, eye, exp, random from numpy.linalg import matrix_power from numpy.testing import (TestCase, run_module_suite, assert_allclose, assert_, assert_array_almost_equal, assert_equal, assert_array_almost_equal_nulp) from scipy.sparse import csc_matrix, SparseEfficiencyWarning from scipy.sparse.construct import eye as speye from scipy.sparse.linalg.matfuncs import (expm, ProductOperator, MatrixPowerOperator, _onenorm_matrix_power_nnm) from scipy.linalg import logm from scipy.special import factorial import scipy.sparse import scipy.sparse.linalg def _burkardt_13_power(n, p): """ A helper function for testing matrix functions. Parameters ---------- n : integer greater than 1 Order of the square matrix to be returned. p : non-negative integer Power of the matrix. Returns ------- out : ndarray representing a square matrix A Forsythe matrix of order n, raised to the power p. """ # Input validation. if n != int(n) or n < 2: raise ValueError('n must be an integer greater than 1') n = int(n) if p != int(p) or p < 0: raise ValueError('p must be a non-negative integer') p = int(p) # Construct the matrix explicitly. a, b = divmod(p, n) large = np.power(10.0, -n*a) small = large * np.power(10.0, -n) return np.diag([large]*(n-b), b) + np.diag([small]*b, b-n) def test_onenorm_matrix_power_nnm(): np.random.seed(1234) for n in range(1, 5): for p in range(5): M = np.random.random((n, n)) Mp = np.linalg.matrix_power(M, p) observed = _onenorm_matrix_power_nnm(M, p) expected = np.linalg.norm(Mp, 1) assert_allclose(observed, expected) class TestExpM(TestCase): def test_zero_ndarray(self): a = array([[0.,0],[0,0]]) assert_array_almost_equal(expm(a),[[1,0],[0,1]]) def test_zero_sparse(self): a = csc_matrix([[0.,0],[0,0]]) assert_array_almost_equal(expm(a).toarray(),[[1,0],[0,1]]) def test_zero_matrix(self): a = np.matrix([[0.,0],[0,0]]) assert_array_almost_equal(expm(a),[[1,0],[0,1]]) def test_misc_types(self): A = expm(np.array([[1]])) yield assert_allclose, expm(((1,),)), A yield assert_allclose, expm([[1]]), A yield assert_allclose, expm(np.matrix([[1]])), A yield assert_allclose, expm(np.array([[1]])), A yield assert_allclose, expm(csc_matrix([[1]])), A B = expm(np.array([[1j]])) yield assert_allclose, expm(((1j,),)), B yield assert_allclose, expm([[1j]]), B yield assert_allclose, expm(np.matrix([[1j]])), B yield assert_allclose, expm(csc_matrix([[1j]])), B def test_bidiagonal_sparse(self): A = csc_matrix([ [1, 3, 0], [0, 1, 5], [0, 0, 2]], dtype=float) e1 = math.exp(1) e2 = math.exp(2) expected = np.array([ [e1, 3*e1, 15*(e2 - 2*e1)], [0, e1, 5*(e2 - e1)], [0, 0, e2]], dtype=float) observed = expm(A).toarray() assert_array_almost_equal(observed, expected) def test_padecases_dtype_float(self): for dtype in [np.float32, np.float64]: for scale in [1e-2, 1e-1, 5e-1, 1, 10]: A = scale * eye(3, dtype=dtype) observed = expm(A) expected = exp(scale) * eye(3, dtype=dtype) assert_array_almost_equal_nulp(observed, expected, nulp=100) def test_padecases_dtype_complex(self): for dtype in [np.complex64, np.complex128]: for scale in [1e-2, 1e-1, 5e-1, 1, 10]: A = scale * eye(3, dtype=dtype) observed = expm(A) expected = exp(scale) * eye(3, dtype=dtype) assert_array_almost_equal_nulp(observed, expected, nulp=100) def test_padecases_dtype_sparse_float(self): # float32 and complex64 lead to errors in spsolve/UMFpack dtype = np.float64 with warnings.catch_warnings(): warnings.simplefilter("ignore", category=SparseEfficiencyWarning) for scale in [1e-2, 1e-1, 5e-1, 1, 10]: a = scale * speye(3, 3, dtype=dtype, format='csc') e = exp(scale) * eye(3, dtype=dtype) assert_array_almost_equal_nulp(expm(a).toarray(), e, nulp=100) def test_padecases_dtype_sparse_complex(self): # float32 and complex64 lead to errors in spsolve/UMFpack dtype = np.complex128 with warnings.catch_warnings(): warnings.simplefilter("ignore", category=SparseEfficiencyWarning) for scale in [1e-2, 1e-1, 5e-1, 1, 10]: a = scale * speye(3, 3, dtype=dtype, format='csc') e = exp(scale) * eye(3, dtype=dtype) assert_array_almost_equal_nulp(expm(a).toarray(), e, nulp=100) def test_logm_consistency(self): random.seed(1234) for dtype in [np.float64, np.complex128]: for n in range(1, 10): for scale in [1e-4, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2]: # make logm(A) be of a given scale A = (eye(n) + random.rand(n, n) * scale).astype(dtype) if np.iscomplexobj(A): A = A + 1j * random.rand(n, n) * scale assert_array_almost_equal(expm(logm(A)), A) def test_integer_matrix(self): Q = np.array([ [-3, 1, 1, 1], [1, -3, 1, 1], [1, 1, -3, 1], [1, 1, 1, -3]]) assert_allclose(expm(Q), expm(1.0 * Q)) def test_triangularity_perturbation(self): # Experiment (1) of # Awad H. Al-Mohy and Nicholas J. Higham (2012) # Improved Inverse Scaling and Squaring Algorithms # for the Matrix Logarithm. A = np.array([ [3.2346e-1, 3e4, 3e4, 3e4], [0, 3.0089e-1, 3e4, 3e4], [0, 0, 3.221e-1, 3e4], [0, 0, 0, 3.0744e-1]], dtype=float) A_logm = np.array([ [-1.12867982029050462e+00, 9.61418377142025565e+04, -4.52485573953179264e+09, 2.92496941103871812e+14], [0.00000000000000000e+00, -1.20101052953082288e+00, 9.63469687211303099e+04, -4.68104828911105442e+09], [0.00000000000000000e+00, 0.00000000000000000e+00, -1.13289322264498393e+00, 9.53249183094775653e+04], [0.00000000000000000e+00, 0.00000000000000000e+00, 0.00000000000000000e+00, -1.17947533272554850e+00]], dtype=float) assert_allclose(expm(A_logm), A, rtol=1e-4) # Perturb the upper triangular matrix by tiny amounts, # so that it becomes technically not upper triangular. random.seed(1234) tiny = 1e-17 A_logm_perturbed = A_logm.copy() A_logm_perturbed[1, 0] = tiny A_expm_logm_perturbed = expm(A_logm_perturbed) rtol = 1e-4 atol = 100 * tiny assert_(not np.allclose(A_expm_logm_perturbed, A, rtol=rtol, atol=atol)) def test_burkardt_1(self): # This matrix is diagonal. # The calculation of the matrix exponential is simple. # # This is the first of a series of matrix exponential tests # collected by John Burkardt from the following sources. # # Alan Laub, # Review of "Linear System Theory" by Joao Hespanha, # SIAM Review, # Volume 52, Number 4, December 2010, pages 779--781. # # Cleve Moler and Charles Van Loan, # Nineteen Dubious Ways to Compute the Exponential of a Matrix, # Twenty-Five Years Later, # SIAM Review, # Volume 45, Number 1, March 2003, pages 3--49. # # Cleve Moler, # Cleve's Corner: A Balancing Act for the Matrix Exponential, # 23 July 2012. # # Robert Ward, # Numerical computation of the matrix exponential # with accuracy estimate, # SIAM Journal on Numerical Analysis, # Volume 14, Number 4, September 1977, pages 600--610. exp1 = np.exp(1) exp2 = np.exp(2) A = np.array([ [1, 0], [0, 2], ], dtype=float) desired = np.array([ [exp1, 0], [0, exp2], ], dtype=float) actual = expm(A) assert_allclose(actual, desired) def test_burkardt_2(self): # This matrix is symmetric. # The calculation of the matrix exponential is straightforward. A = np.array([ [1, 3], [3, 2], ], dtype=float) desired = np.array([ [39.322809708033859, 46.166301438885753], [46.166301438885768, 54.711576854329110], ], dtype=float) actual = expm(A) assert_allclose(actual, desired) def test_burkardt_3(self): # This example is due to Laub. # This matrix is ill-suited for the Taylor series approach. # As powers of A are computed, the entries blow up too quickly. exp1 = np.exp(1) exp39 = np.exp(39) A = np.array([ [0, 1], [-39, -40], ], dtype=float) desired = np.array([ [ 39/(38*exp1) - 1/(38*exp39), -np.expm1(-38) / (38*exp1)], [ 39*np.expm1(-38) / (38*exp1), -1/(38*exp1) + 39/(38*exp39)], ], dtype=float) actual = expm(A) assert_allclose(actual, desired) def test_burkardt_4(self): # This example is due to Moler and Van Loan. # The example will cause problems for the series summation approach, # as well as for diagonal Pade approximations. A = np.array([ [-49, 24], [-64, 31], ], dtype=float) U = np.array([[3, 1], [4, 2]], dtype=float) V = np.array([[1, -1/2], [-2, 3/2]], dtype=float) w = np.array([-17, -1], dtype=float) desired = np.dot(U * np.exp(w), V) actual = expm(A) assert_allclose(actual, desired) def test_burkardt_5(self): # This example is due to Moler and Van Loan. # This matrix is strictly upper triangular # All powers of A are zero beyond some (low) limit. # This example will cause problems for Pade approximations. A = np.array([ [0, 6, 0, 0], [0, 0, 6, 0], [0, 0, 0, 6], [0, 0, 0, 0], ], dtype=float) desired = np.array([ [1, 6, 18, 36], [0, 1, 6, 18], [0, 0, 1, 6], [0, 0, 0, 1], ], dtype=float) actual = expm(A) assert_allclose(actual, desired) def test_burkardt_6(self): # This example is due to Moler and Van Loan. # This matrix does not have a complete set of eigenvectors. # That means the eigenvector approach will fail. exp1 = np.exp(1) A = np.array([ [1, 1], [0, 1], ], dtype=float) desired = np.array([ [exp1, exp1], [0, exp1], ], dtype=float) actual = expm(A) assert_allclose(actual, desired) def test_burkardt_7(self): # This example is due to Moler and Van Loan. # This matrix is very close to example 5. # Mathematically, it has a complete set of eigenvectors. # Numerically, however, the calculation will be suspect. exp1 = np.exp(1) eps = np.spacing(1) A = np.array([ [1 + eps, 1], [0, 1 - eps], ], dtype=float) desired = np.array([ [exp1, exp1], [0, exp1], ], dtype=float) actual = expm(A) assert_allclose(actual, desired) def test_burkardt_8(self): # This matrix was an example in Wikipedia. exp4 = np.exp(4) exp16 = np.exp(16) A = np.array([ [21, 17, 6], [-5, -1, -6], [4, 4, 16], ], dtype=float) desired = np.array([ [13*exp16 - exp4, 13*exp16 - 5*exp4, 2*exp16 - 2*exp4], [-9*exp16 + exp4, -9*exp16 + 5*exp4, -2*exp16 + 2*exp4], [16*exp16, 16*exp16, 4*exp16], ], dtype=float) * 0.25 actual = expm(A) assert_allclose(actual, desired) def test_burkardt_9(self): # This matrix is due to the NAG Library. # It is an example for function F01ECF. A = np.array([ [1, 2, 2, 2], [3, 1, 1, 2], [3, 2, 1, 2], [3, 3, 3, 1], ], dtype=float) desired = np.array([ [740.7038, 610.8500, 542.2743, 549.1753], [731.2510, 603.5524, 535.0884, 542.2743], [823.7630, 679.4257, 603.5524, 610.8500], [998.4355, 823.7630, 731.2510, 740.7038], ], dtype=float) actual = expm(A) assert_allclose(actual, desired) def test_burkardt_10(self): # This is Ward's example #1. # It is defective and nonderogatory. A = np.array([ [4, 2, 0], [1, 4, 1], [1, 1, 4], ], dtype=float) assert_allclose(sorted(scipy.linalg.eigvals(A)), (3, 3, 6)) desired = np.array([ [147.8666224463699, 183.7651386463682, 71.79703239999647], [127.7810855231823, 183.7651386463682, 91.88256932318415], [127.7810855231824, 163.6796017231806, 111.9681062463718], ], dtype=float) actual = expm(A) assert_allclose(actual, desired) def test_burkardt_11(self): # This is Ward's example #2. # It is a symmetric matrix. A = np.array([ [29.87942128909879, 0.7815750847907159, -2.289519314033932], [0.7815750847907159, 25.72656945571064, 8.680737820540137], [-2.289519314033932, 8.680737820540137, 34.39400925519054], ], dtype=float) assert_allclose(scipy.linalg.eigvalsh(A), (20, 30, 40)) desired = np.array([ [ 5.496313853692378E+15, -1.823188097200898E+16, -3.047577080858001E+16], [ -1.823188097200899E+16, 6.060522870222108E+16, 1.012918429302482E+17], [ -3.047577080858001E+16, 1.012918429302482E+17, 1.692944112408493E+17], ], dtype=float) actual = expm(A) assert_allclose(actual, desired) def test_burkardt_12(self): # This is Ward's example #3. # Ward's algorithm has difficulty estimating the accuracy # of its results. A = np.array([ [-131, 19, 18], [-390, 56, 54], [-387, 57, 52], ], dtype=float) assert_allclose(sorted(scipy.linalg.eigvals(A)), (-20, -2, -1)) desired = np.array([ [-1.509644158793135, 0.3678794391096522, 0.1353352811751005], [-5.632570799891469, 1.471517758499875, 0.4060058435250609], [-4.934938326088363, 1.103638317328798, 0.5413411267617766], ], dtype=float) actual = expm(A) assert_allclose(actual, desired) def test_burkardt_13(self): # This is Ward's example #4. # This is a version of the Forsythe matrix. # The eigenvector problem is badly conditioned. # Ward's algorithm has difficulty esimating the accuracy # of its results for this problem. # # Check the construction of one instance of this family of matrices. A4_actual = _burkardt_13_power(4, 1) A4_desired = [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [1e-4, 0, 0, 0]] assert_allclose(A4_actual, A4_desired) # Check the expm for a few instances. for n in (2, 3, 4, 10): # Approximate expm using Taylor series. # This works well for this matrix family # because each matrix in the summation, # even before dividing by the factorial, # is entrywise positive with max entry 10**(-floor(p/n)*n). k = max(1, int(np.ceil(16/n))) desired = np.zeros((n, n), dtype=float) for p in range(n*k): Ap = _burkardt_13_power(n, p) assert_equal(np.min(Ap), 0) assert_allclose(np.max(Ap), np.power(10, -np.floor(p/n)*n)) desired += Ap / factorial(p) actual = expm(_burkardt_13_power(n, 1)) assert_allclose(actual, desired) def test_burkardt_14(self): # This is Moler's example. # This badly scaled matrix caused problems for MATLAB's expm(). A = np.array([ [0, 1e-8, 0], [-(2e10 + 4e8/6.), -3, 2e10], [200./3., 0, -200./3.], ], dtype=float) desired = np.array([ [0.446849468283175, 1.54044157383952e-09, 0.462811453558774], [-5743067.77947947, -0.0152830038686819, -4526542.71278401], [0.447722977849494, 1.54270484519591e-09, 0.463480648837651], ], dtype=float) actual = expm(A) assert_allclose(actual, desired) class TestOperators(TestCase): def test_product_operator(self): random.seed(1234) n = 5 k = 2 nsamples = 10 for i in range(nsamples): A = np.random.randn(n, n) B = np.random.randn(n, n) C = np.random.randn(n, n) D = np.random.randn(n, k) op = ProductOperator(A, B, C) assert_allclose(op.matmat(D), A.dot(B).dot(C).dot(D)) assert_allclose(op.T.matmat(D), (A.dot(B).dot(C)).T.dot(D)) def test_matrix_power_operator(self): random.seed(1234) n = 5 k = 2 p = 3 nsamples = 10 for i in range(nsamples): A = np.random.randn(n, n) B = np.random.randn(n, k) op = MatrixPowerOperator(A, p) assert_allclose(op.matmat(B), matrix_power(A, p).dot(B)) assert_allclose(op.T.matmat(B), matrix_power(A, p).T.dot(B)) if __name__ == "__main__": run_module_suite()
bsd-3-clause
Francis-Liu/animated-broccoli
nova/api/openstack/compute/schemas/volumes.py
17
2852
# Copyright 2014 IBM Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from nova.api.validation import parameter_types create = { 'type': 'object', 'properties': { 'volume': { 'type': 'object', 'properties': { 'volume_type': {'type': 'string'}, 'metadata': {'type': 'object'}, 'snapshot_id': {'type': 'string'}, 'size': { 'type': ['integer', 'string'], 'pattern': '^[0-9]+$', 'minimum': 1 }, 'availability_zone': {'type': 'string'}, 'display_name': {'type': 'string'}, 'display_description': {'type': 'string'}, }, 'additionalProperties': False, }, }, 'required': ['volume'], 'additionalProperties': False, } snapshot_create = { 'type': 'object', 'properties': { 'snapshot': { 'type': 'object', 'properties': { 'volume_id': {'type': 'string'}, 'force': parameter_types.boolean, 'display_name': {'type': 'string'}, 'display_description': {'type': 'string'}, }, 'required': ['volume_id'], 'additionalProperties': False, }, }, 'required': ['snapshot'], 'additionalProperties': False, } create_volume_attachment = { 'type': 'object', 'properties': { 'volumeAttachment': { 'type': 'object', 'properties': { 'volumeId': parameter_types.volume_id, 'device': { 'type': ['string', 'null'], # NOTE: The validation pattern from match_device() in # nova/block_device.py. 'pattern': '(^/dev/x{0,1}[a-z]{0,1}d{0,1})([a-z]+)[0-9]*$' } }, 'required': ['volumeId'], 'additionalProperties': False, }, }, 'required': ['volumeAttachment'], 'additionalProperties': False, } update_volume_attachment = copy.deepcopy(create_volume_attachment) del update_volume_attachment['properties']['volumeAttachment'][ 'properties']['device']
apache-2.0
jvvto/fia-search
graphicsDisplay.py
42
28031
# graphicsDisplay.py # ------------------ # Licensing Information: You are free to use or extend these projects for # educational purposes provided that (1) you do not distribute or publish # solutions, (2) you retain this notice, and (3) you provide clear # attribution to UC Berkeley, including a link to http://ai.berkeley.edu. # # Attribution Information: The Pacman AI projects were developed at UC Berkeley. # The core projects and autograders were primarily created by John DeNero # (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). # Student side autograding was added by Brad Miller, Nick Hay, and # Pieter Abbeel (pabbeel@cs.berkeley.edu). from graphicsUtils import * import math, time from game import Directions ########################### # GRAPHICS DISPLAY CODE # ########################### # Most code by Dan Klein and John Denero written or rewritten for cs188, UC Berkeley. # Some code from a Pacman implementation by LiveWires, and used / modified with permission. DEFAULT_GRID_SIZE = 30.0 INFO_PANE_HEIGHT = 35 BACKGROUND_COLOR = formatColor(0,0,0) WALL_COLOR = formatColor(0.0/255.0, 51.0/255.0, 255.0/255.0) INFO_PANE_COLOR = formatColor(.4,.4,0) SCORE_COLOR = formatColor(.9, .9, .9) PACMAN_OUTLINE_WIDTH = 2 PACMAN_CAPTURE_OUTLINE_WIDTH = 4 GHOST_COLORS = [] GHOST_COLORS.append(formatColor(.9,0,0)) # Red GHOST_COLORS.append(formatColor(0,.3,.9)) # Blue GHOST_COLORS.append(formatColor(.98,.41,.07)) # Orange GHOST_COLORS.append(formatColor(.1,.75,.7)) # Green GHOST_COLORS.append(formatColor(1.0,0.6,0.0)) # Yellow GHOST_COLORS.append(formatColor(.4,0.13,0.91)) # Purple TEAM_COLORS = GHOST_COLORS[:2] GHOST_SHAPE = [ ( 0, 0.3 ), ( 0.25, 0.75 ), ( 0.5, 0.3 ), ( 0.75, 0.75 ), ( 0.75, -0.5 ), ( 0.5, -0.75 ), (-0.5, -0.75 ), (-0.75, -0.5 ), (-0.75, 0.75 ), (-0.5, 0.3 ), (-0.25, 0.75 ) ] GHOST_SIZE = 0.65 SCARED_COLOR = formatColor(1,1,1) GHOST_VEC_COLORS = map(colorToVector, GHOST_COLORS) PACMAN_COLOR = formatColor(255.0/255.0,255.0/255.0,61.0/255) PACMAN_SCALE = 0.5 #pacman_speed = 0.25 # Food FOOD_COLOR = formatColor(1,1,1) FOOD_SIZE = 0.1 # Laser LASER_COLOR = formatColor(1,0,0) LASER_SIZE = 0.02 # Capsule graphics CAPSULE_COLOR = formatColor(1,1,1) CAPSULE_SIZE = 0.25 # Drawing walls WALL_RADIUS = 0.15 class InfoPane: def __init__(self, layout, gridSize): self.gridSize = gridSize self.width = (layout.width) * gridSize self.base = (layout.height + 1) * gridSize self.height = INFO_PANE_HEIGHT self.fontSize = 24 self.textColor = PACMAN_COLOR self.drawPane() def toScreen(self, pos, y = None): """ Translates a point relative from the bottom left of the info pane. """ if y == None: x,y = pos else: x = pos x = self.gridSize + x # Margin y = self.base + y return x,y def drawPane(self): self.scoreText = text( self.toScreen(0, 0 ), self.textColor, "SCORE: 0", "Times", self.fontSize, "bold") def initializeGhostDistances(self, distances): self.ghostDistanceText = [] size = 20 if self.width < 240: size = 12 if self.width < 160: size = 10 for i, d in enumerate(distances): t = text( self.toScreen(self.width/2 + self.width/8 * i, 0), GHOST_COLORS[i+1], d, "Times", size, "bold") self.ghostDistanceText.append(t) def updateScore(self, score): changeText(self.scoreText, "SCORE: % 4d" % score) def setTeam(self, isBlue): text = "RED TEAM" if isBlue: text = "BLUE TEAM" self.teamText = text( self.toScreen(300, 0 ), self.textColor, text, "Times", self.fontSize, "bold") def updateGhostDistances(self, distances): if len(distances) == 0: return if 'ghostDistanceText' not in dir(self): self.initializeGhostDistances(distances) else: for i, d in enumerate(distances): changeText(self.ghostDistanceText[i], d) def drawGhost(self): pass def drawPacman(self): pass def drawWarning(self): pass def clearIcon(self): pass def updateMessage(self, message): pass def clearMessage(self): pass class PacmanGraphics: def __init__(self, zoom=1.0, frameTime=0.0, capture=False): self.have_window = 0 self.currentGhostImages = {} self.pacmanImage = None self.zoom = zoom self.gridSize = DEFAULT_GRID_SIZE * zoom self.capture = capture self.frameTime = frameTime def checkNullDisplay(self): return False def initialize(self, state, isBlue = False): self.isBlue = isBlue self.startGraphics(state) # self.drawDistributions(state) self.distributionImages = None # Initialized lazily self.drawStaticObjects(state) self.drawAgentObjects(state) # Information self.previousState = state def startGraphics(self, state): self.layout = state.layout layout = self.layout self.width = layout.width self.height = layout.height self.make_window(self.width, self.height) self.infoPane = InfoPane(layout, self.gridSize) self.currentState = layout def drawDistributions(self, state): walls = state.layout.walls dist = [] for x in range(walls.width): distx = [] dist.append(distx) for y in range(walls.height): ( screen_x, screen_y ) = self.to_screen( (x, y) ) block = square( (screen_x, screen_y), 0.5 * self.gridSize, color = BACKGROUND_COLOR, filled = 1, behind=2) distx.append(block) self.distributionImages = dist def drawStaticObjects(self, state): layout = self.layout self.drawWalls(layout.walls) self.food = self.drawFood(layout.food) self.capsules = self.drawCapsules(layout.capsules) refresh() def drawAgentObjects(self, state): self.agentImages = [] # (agentState, image) for index, agent in enumerate(state.agentStates): if agent.isPacman: image = self.drawPacman(agent, index) self.agentImages.append( (agent, image) ) else: image = self.drawGhost(agent, index) self.agentImages.append( (agent, image) ) refresh() def swapImages(self, agentIndex, newState): """ Changes an image from a ghost to a pacman or vis versa (for capture) """ prevState, prevImage = self.agentImages[agentIndex] for item in prevImage: remove_from_screen(item) if newState.isPacman: image = self.drawPacman(newState, agentIndex) self.agentImages[agentIndex] = (newState, image ) else: image = self.drawGhost(newState, agentIndex) self.agentImages[agentIndex] = (newState, image ) refresh() def update(self, newState): agentIndex = newState._agentMoved agentState = newState.agentStates[agentIndex] if self.agentImages[agentIndex][0].isPacman != agentState.isPacman: self.swapImages(agentIndex, agentState) prevState, prevImage = self.agentImages[agentIndex] if agentState.isPacman: self.animatePacman(agentState, prevState, prevImage) else: self.moveGhost(agentState, agentIndex, prevState, prevImage) self.agentImages[agentIndex] = (agentState, prevImage) if newState._foodEaten != None: self.removeFood(newState._foodEaten, self.food) if newState._capsuleEaten != None: self.removeCapsule(newState._capsuleEaten, self.capsules) self.infoPane.updateScore(newState.score) if 'ghostDistances' in dir(newState): self.infoPane.updateGhostDistances(newState.ghostDistances) def make_window(self, width, height): grid_width = (width-1) * self.gridSize grid_height = (height-1) * self.gridSize screen_width = 2*self.gridSize + grid_width screen_height = 2*self.gridSize + grid_height + INFO_PANE_HEIGHT begin_graphics(screen_width, screen_height, BACKGROUND_COLOR, "CS188 Pacman") def drawPacman(self, pacman, index): position = self.getPosition(pacman) screen_point = self.to_screen(position) endpoints = self.getEndpoints(self.getDirection(pacman)) width = PACMAN_OUTLINE_WIDTH outlineColor = PACMAN_COLOR fillColor = PACMAN_COLOR if self.capture: outlineColor = TEAM_COLORS[index % 2] fillColor = GHOST_COLORS[index] width = PACMAN_CAPTURE_OUTLINE_WIDTH return [circle(screen_point, PACMAN_SCALE * self.gridSize, fillColor = fillColor, outlineColor = outlineColor, endpoints = endpoints, width = width)] def getEndpoints(self, direction, position=(0,0)): x, y = position pos = x - int(x) + y - int(y) width = 30 + 80 * math.sin(math.pi* pos) delta = width / 2 if (direction == 'West'): endpoints = (180+delta, 180-delta) elif (direction == 'North'): endpoints = (90+delta, 90-delta) elif (direction == 'South'): endpoints = (270+delta, 270-delta) else: endpoints = (0+delta, 0-delta) return endpoints def movePacman(self, position, direction, image): screenPosition = self.to_screen(position) endpoints = self.getEndpoints( direction, position ) r = PACMAN_SCALE * self.gridSize moveCircle(image[0], screenPosition, r, endpoints) refresh() def animatePacman(self, pacman, prevPacman, image): if self.frameTime < 0: print 'Press any key to step forward, "q" to play' keys = wait_for_keys() if 'q' in keys: self.frameTime = 0.1 if self.frameTime > 0.01 or self.frameTime < 0: start = time.time() fx, fy = self.getPosition(prevPacman) px, py = self.getPosition(pacman) frames = 4.0 for i in range(1,int(frames) + 1): pos = px*i/frames + fx*(frames-i)/frames, py*i/frames + fy*(frames-i)/frames self.movePacman(pos, self.getDirection(pacman), image) refresh() sleep(abs(self.frameTime) / frames) else: self.movePacman(self.getPosition(pacman), self.getDirection(pacman), image) refresh() def getGhostColor(self, ghost, ghostIndex): if ghost.scaredTimer > 0: return SCARED_COLOR else: return GHOST_COLORS[ghostIndex] def drawGhost(self, ghost, agentIndex): pos = self.getPosition(ghost) dir = self.getDirection(ghost) (screen_x, screen_y) = (self.to_screen(pos) ) coords = [] for (x, y) in GHOST_SHAPE: coords.append((x*self.gridSize*GHOST_SIZE + screen_x, y*self.gridSize*GHOST_SIZE + screen_y)) colour = self.getGhostColor(ghost, agentIndex) body = polygon(coords, colour, filled = 1) WHITE = formatColor(1.0, 1.0, 1.0) BLACK = formatColor(0.0, 0.0, 0.0) dx = 0 dy = 0 if dir == 'North': dy = -0.2 if dir == 'South': dy = 0.2 if dir == 'East': dx = 0.2 if dir == 'West': dx = -0.2 leftEye = circle((screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2, WHITE, WHITE) rightEye = circle((screen_x+self.gridSize*GHOST_SIZE*(0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2, WHITE, WHITE) leftPupil = circle((screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08, BLACK, BLACK) rightPupil = circle((screen_x+self.gridSize*GHOST_SIZE*(0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08, BLACK, BLACK) ghostImageParts = [] ghostImageParts.append(body) ghostImageParts.append(leftEye) ghostImageParts.append(rightEye) ghostImageParts.append(leftPupil) ghostImageParts.append(rightPupil) return ghostImageParts def moveEyes(self, pos, dir, eyes): (screen_x, screen_y) = (self.to_screen(pos) ) dx = 0 dy = 0 if dir == 'North': dy = -0.2 if dir == 'South': dy = 0.2 if dir == 'East': dx = 0.2 if dir == 'West': dx = -0.2 moveCircle(eyes[0],(screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2) moveCircle(eyes[1],(screen_x+self.gridSize*GHOST_SIZE*(0.3+dx/1.5), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy/1.5)), self.gridSize*GHOST_SIZE*0.2) moveCircle(eyes[2],(screen_x+self.gridSize*GHOST_SIZE*(-0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08) moveCircle(eyes[3],(screen_x+self.gridSize*GHOST_SIZE*(0.3+dx), screen_y-self.gridSize*GHOST_SIZE*(0.3-dy)), self.gridSize*GHOST_SIZE*0.08) def moveGhost(self, ghost, ghostIndex, prevGhost, ghostImageParts): old_x, old_y = self.to_screen(self.getPosition(prevGhost)) new_x, new_y = self.to_screen(self.getPosition(ghost)) delta = new_x - old_x, new_y - old_y for ghostImagePart in ghostImageParts: move_by(ghostImagePart, delta) refresh() if ghost.scaredTimer > 0: color = SCARED_COLOR else: color = GHOST_COLORS[ghostIndex] edit(ghostImageParts[0], ('fill', color), ('outline', color)) self.moveEyes(self.getPosition(ghost), self.getDirection(ghost), ghostImageParts[-4:]) refresh() def getPosition(self, agentState): if agentState.configuration == None: return (-1000, -1000) return agentState.getPosition() def getDirection(self, agentState): if agentState.configuration == None: return Directions.STOP return agentState.configuration.getDirection() def finish(self): end_graphics() def to_screen(self, point): ( x, y ) = point #y = self.height - y x = (x + 1)*self.gridSize y = (self.height - y)*self.gridSize return ( x, y ) # Fixes some TK issue with off-center circles def to_screen2(self, point): ( x, y ) = point #y = self.height - y x = (x + 1)*self.gridSize y = (self.height - y)*self.gridSize return ( x, y ) def drawWalls(self, wallMatrix): wallColor = WALL_COLOR for xNum, x in enumerate(wallMatrix): if self.capture and (xNum * 2) < wallMatrix.width: wallColor = TEAM_COLORS[0] if self.capture and (xNum * 2) >= wallMatrix.width: wallColor = TEAM_COLORS[1] for yNum, cell in enumerate(x): if cell: # There's a wall here pos = (xNum, yNum) screen = self.to_screen(pos) screen2 = self.to_screen2(pos) # draw each quadrant of the square based on adjacent walls wIsWall = self.isWall(xNum-1, yNum, wallMatrix) eIsWall = self.isWall(xNum+1, yNum, wallMatrix) nIsWall = self.isWall(xNum, yNum+1, wallMatrix) sIsWall = self.isWall(xNum, yNum-1, wallMatrix) nwIsWall = self.isWall(xNum-1, yNum+1, wallMatrix) swIsWall = self.isWall(xNum-1, yNum-1, wallMatrix) neIsWall = self.isWall(xNum+1, yNum+1, wallMatrix) seIsWall = self.isWall(xNum+1, yNum-1, wallMatrix) # NE quadrant if (not nIsWall) and (not eIsWall): # inner circle circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (0,91), 'arc') if (nIsWall) and (not eIsWall): # vertical line line(add(screen, (self.gridSize*WALL_RADIUS, 0)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(-0.5)-1)), wallColor) if (not nIsWall) and (eIsWall): # horizontal line line(add(screen, (0, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5+1, self.gridSize*(-1)*WALL_RADIUS)), wallColor) if (nIsWall) and (eIsWall) and (not neIsWall): # outer circle circle(add(screen2, (self.gridSize*2*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (180,271), 'arc') line(add(screen, (self.gridSize*2*WALL_RADIUS-1, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5+1, self.gridSize*(-1)*WALL_RADIUS)), wallColor) line(add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS+1)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(-0.5))), wallColor) # NW quadrant if (not nIsWall) and (not wIsWall): # inner circle circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (90,181), 'arc') if (nIsWall) and (not wIsWall): # vertical line line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, 0)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(-0.5)-1)), wallColor) if (not nIsWall) and (wIsWall): # horizontal line line(add(screen, (0, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5)-1, self.gridSize*(-1)*WALL_RADIUS)), wallColor) if (nIsWall) and (wIsWall) and (not nwIsWall): # outer circle circle(add(screen2, (self.gridSize*(-2)*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (270,361), 'arc') line(add(screen, (self.gridSize*(-2)*WALL_RADIUS+1, self.gridSize*(-1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5), self.gridSize*(-1)*WALL_RADIUS)), wallColor) line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(-2)*WALL_RADIUS+1)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(-0.5))), wallColor) # SE quadrant if (not sIsWall) and (not eIsWall): # inner circle circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (270,361), 'arc') if (sIsWall) and (not eIsWall): # vertical line line(add(screen, (self.gridSize*WALL_RADIUS, 0)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(0.5)+1)), wallColor) if (not sIsWall) and (eIsWall): # horizontal line line(add(screen, (0, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5+1, self.gridSize*(1)*WALL_RADIUS)), wallColor) if (sIsWall) and (eIsWall) and (not seIsWall): # outer circle circle(add(screen2, (self.gridSize*2*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (90,181), 'arc') line(add(screen, (self.gridSize*2*WALL_RADIUS-1, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*0.5, self.gridSize*(1)*WALL_RADIUS)), wallColor) line(add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS-1)), add(screen, (self.gridSize*WALL_RADIUS, self.gridSize*(0.5))), wallColor) # SW quadrant if (not sIsWall) and (not wIsWall): # inner circle circle(screen2, WALL_RADIUS * self.gridSize, wallColor, wallColor, (180,271), 'arc') if (sIsWall) and (not wIsWall): # vertical line line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, 0)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(0.5)+1)), wallColor) if (not sIsWall) and (wIsWall): # horizontal line line(add(screen, (0, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5)-1, self.gridSize*(1)*WALL_RADIUS)), wallColor) if (sIsWall) and (wIsWall) and (not swIsWall): # outer circle circle(add(screen2, (self.gridSize*(-2)*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS)), WALL_RADIUS * self.gridSize-1, wallColor, wallColor, (0,91), 'arc') line(add(screen, (self.gridSize*(-2)*WALL_RADIUS+1, self.gridSize*(1)*WALL_RADIUS)), add(screen, (self.gridSize*(-0.5), self.gridSize*(1)*WALL_RADIUS)), wallColor) line(add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(2)*WALL_RADIUS-1)), add(screen, (self.gridSize*(-1)*WALL_RADIUS, self.gridSize*(0.5))), wallColor) def isWall(self, x, y, walls): if x < 0 or y < 0: return False if x >= walls.width or y >= walls.height: return False return walls[x][y] def drawFood(self, foodMatrix ): foodImages = [] color = FOOD_COLOR for xNum, x in enumerate(foodMatrix): if self.capture and (xNum * 2) <= foodMatrix.width: color = TEAM_COLORS[0] if self.capture and (xNum * 2) > foodMatrix.width: color = TEAM_COLORS[1] imageRow = [] foodImages.append(imageRow) for yNum, cell in enumerate(x): if cell: # There's food here screen = self.to_screen((xNum, yNum )) dot = circle( screen, FOOD_SIZE * self.gridSize, outlineColor = color, fillColor = color, width = 1) imageRow.append(dot) else: imageRow.append(None) return foodImages def drawCapsules(self, capsules ): capsuleImages = {} for capsule in capsules: ( screen_x, screen_y ) = self.to_screen(capsule) dot = circle( (screen_x, screen_y), CAPSULE_SIZE * self.gridSize, outlineColor = CAPSULE_COLOR, fillColor = CAPSULE_COLOR, width = 1) capsuleImages[capsule] = dot return capsuleImages def removeFood(self, cell, foodImages ): x, y = cell remove_from_screen(foodImages[x][y]) def removeCapsule(self, cell, capsuleImages ): x, y = cell remove_from_screen(capsuleImages[(x, y)]) def drawExpandedCells(self, cells): """ Draws an overlay of expanded grid positions for search agents """ n = float(len(cells)) baseColor = [1.0, 0.0, 0.0] self.clearExpandedCells() self.expandedCells = [] for k, cell in enumerate(cells): screenPos = self.to_screen( cell) cellColor = formatColor(*[(n-k) * c * .5 / n + .25 for c in baseColor]) block = square(screenPos, 0.5 * self.gridSize, color = cellColor, filled = 1, behind=2) self.expandedCells.append(block) if self.frameTime < 0: refresh() def clearExpandedCells(self): if 'expandedCells' in dir(self) and len(self.expandedCells) > 0: for cell in self.expandedCells: remove_from_screen(cell) def updateDistributions(self, distributions): "Draws an agent's belief distributions" # copy all distributions so we don't change their state distributions = map(lambda x: x.copy(), distributions) if self.distributionImages == None: self.drawDistributions(self.previousState) for x in range(len(self.distributionImages)): for y in range(len(self.distributionImages[0])): image = self.distributionImages[x][y] weights = [dist[ (x,y) ] for dist in distributions] if sum(weights) != 0: pass # Fog of war color = [0.0,0.0,0.0] colors = GHOST_VEC_COLORS[1:] # With Pacman if self.capture: colors = GHOST_VEC_COLORS for weight, gcolor in zip(weights, colors): color = [min(1.0, c + 0.95 * g * weight ** .3) for c,g in zip(color, gcolor)] changeColor(image, formatColor(*color)) refresh() class FirstPersonPacmanGraphics(PacmanGraphics): def __init__(self, zoom = 1.0, showGhosts = True, capture = False, frameTime=0): PacmanGraphics.__init__(self, zoom, frameTime=frameTime) self.showGhosts = showGhosts self.capture = capture def initialize(self, state, isBlue = False): self.isBlue = isBlue PacmanGraphics.startGraphics(self, state) # Initialize distribution images walls = state.layout.walls dist = [] self.layout = state.layout # Draw the rest self.distributionImages = None # initialize lazily self.drawStaticObjects(state) self.drawAgentObjects(state) # Information self.previousState = state def lookAhead(self, config, state): if config.getDirection() == 'Stop': return else: pass # Draw relevant ghosts allGhosts = state.getGhostStates() visibleGhosts = state.getVisibleGhosts() for i, ghost in enumerate(allGhosts): if ghost in visibleGhosts: self.drawGhost(ghost, i) else: self.currentGhostImages[i] = None def getGhostColor(self, ghost, ghostIndex): return GHOST_COLORS[ghostIndex] def getPosition(self, ghostState): if not self.showGhosts and not ghostState.isPacman and ghostState.getPosition()[1] > 1: return (-1000, -1000) else: return PacmanGraphics.getPosition(self, ghostState) def add(x, y): return (x[0] + y[0], x[1] + y[1]) # Saving graphical output # ----------------------- # Note: to make an animated gif from this postscript output, try the command: # convert -delay 7 -loop 1 -compress lzw -layers optimize frame* out.gif # convert is part of imagemagick (freeware) SAVE_POSTSCRIPT = False POSTSCRIPT_OUTPUT_DIR = 'frames' FRAME_NUMBER = 0 import os def saveFrame(): "Saves the current graphical output as a postscript file" global SAVE_POSTSCRIPT, FRAME_NUMBER, POSTSCRIPT_OUTPUT_DIR if not SAVE_POSTSCRIPT: return if not os.path.exists(POSTSCRIPT_OUTPUT_DIR): os.mkdir(POSTSCRIPT_OUTPUT_DIR) name = os.path.join(POSTSCRIPT_OUTPUT_DIR, 'frame_%08d.ps' % FRAME_NUMBER) FRAME_NUMBER += 1 writePostscript(name) # writes the current canvas
mit
indashnet/InDashNet.Open.UN2000
lichee/linux-3.4/arch/ia64/scripts/unwcheck.py
13143
1714
#!/usr/bin/python # # Usage: unwcheck.py FILE # # This script checks the unwind info of each function in file FILE # and verifies that the sum of the region-lengths matches the total # length of the function. # # Based on a shell/awk script originally written by Harish Patil, # which was converted to Perl by Matthew Chapman, which was converted # to Python by David Mosberger. # import os import re import sys if len(sys.argv) != 2: print "Usage: %s FILE" % sys.argv[0] sys.exit(2) readelf = os.getenv("READELF", "readelf") start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]") rlen_pattern = re.compile(".*rlen=([0-9]+)") def check_func (func, slots, rlen_sum): if slots != rlen_sum: global num_errors num_errors += 1 if not func: func = "[%#x-%#x]" % (start, end) print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum) return num_funcs = 0 num_errors = 0 func = False slots = 0 rlen_sum = 0 for line in os.popen("%s -u %s" % (readelf, sys.argv[1])): m = start_pattern.match(line) if m: check_func(func, slots, rlen_sum) func = m.group(1) start = long(m.group(2), 16) end = long(m.group(3), 16) slots = 3 * (end - start) / 16 rlen_sum = 0L num_funcs += 1 else: m = rlen_pattern.match(line) if m: rlen_sum += long(m.group(1)) check_func(func, slots, rlen_sum) if num_errors == 0: print "No errors detected in %u functions." % num_funcs else: if num_errors > 1: err="errors" else: err="error" print "%u %s detected in %u functions." % (num_errors, err, num_funcs) sys.exit(1)
apache-2.0
schroeji/i3pystatus
i3pystatus/taskwarrior.py
10
3751
from i3pystatus import IntervalModule from json import loads import subprocess class Taskwarrior(IntervalModule): """ Check Taskwarrior for pending tasks Requires `json` .. rubric:: Available formatters (uses :ref:`formatp`) * `{ready}` — contains number of tasks returned by `ready_filter` * `{urgent}` — contains number of tasks returned by `urgent_filter` * `{next}` — contains the description of next task * `{project}` — contains the projects the next task belongs to .. rubric:: Available callbacks * ``get_next_task`` — Display the next most urgent task. * ``get_prev_task`` — Display the previous most urgent task. * ``reset_next_task`` — Display the most urgent task, resetting any \ switching by other callbacks. """ format = 'Task: {next}' ready_filter = '+READY' urgent_filter = '+TODAY' enable_mark_done = False color_urgent = '#FF0000' color_ready = '#78EAF2' ready_tasks = [] urgent_tasks = [] current_tasks = [] next_id = 0 next_task = None on_upscroll = "get_prev_task" on_downscroll = "get_next_task" on_rightclick = 'mark_task_as_done' on_leftclick = "reset_next_task" settings = ( ('format', 'format string'), ('ready_filter', 'Filters to get ready tasks example: `+READY`'), ('urgent_filter', 'Filters to get urgent tasks example: `+TODAY`'), ('enable_mark_done', 'Enable right click mark task as done'), ('color_urgent', '#FF0000'), ('color_ready', '#78EAF2') ) def reset_next_task(self): self.next_id = 0 self.next_task = self.current_tasks[self.next_id] def get_next_task(self): self.next_id = (self.next_id + 1) % len(self.current_tasks) self.next_task = self.current_tasks[self.next_id] def get_prev_task(self): self.next_id = (self.next_id - 1) % len(self.current_tasks) self.next_task = self.current_tasks[self.next_id] def mark_task_as_done(self): if self.enable_mark_done and self.next_task is not None: subprocess.check_output(['task', str(self.next_task['id']), 'done']) self.get_next_task() def run(self): try: urgent_params = ['task'] + self.urgent_filter.split(' ') + ['export'] urgent_tasks_json = subprocess.check_output(urgent_params) self.urgent_tasks = loads(urgent_tasks_json.decode("utf-8")) self.urgent_tasks = sorted(self.urgent_tasks, key=lambda x: x['urgency'], reverse=True) ready_params = ['task'] + self.ready_filter.split(' ') + ['export'] ready_tasks = subprocess.check_output(ready_params) self.ready_tasks = loads(ready_tasks.decode("utf-8")) self.ready_tasks = sorted(self.ready_tasks, key=lambda x: x['urgency'], reverse=True) self.current_tasks = self.urgent_tasks if len(self.urgent_tasks) > 0 else self.ready_tasks if self.next_id < len(self.current_tasks): self.next_task = self.current_tasks[self.next_id] else: self.next_id = 0 except ValueError: self.logger.exception('Decoding JSON has failed') raise format_values = dict(urgent=len(self.urgent_tasks), ready=len(self.ready_tasks), next='') if self.next_task is not None: format_values['next'] = self.next_task['description'] format_values['project'] = self.next_task.get('project', '') self.output = { 'full_text': self.format.format(**format_values), 'color': self.color_urgent if len(self.urgent_tasks) > 0 else self.color_ready }
mit
aleida/django
tests/regressiontests/localflavor/id/tests.py
13
7228
from __future__ import unicode_literals import warnings from django.contrib.localflavor.id.forms import (IDPhoneNumberField, IDPostCodeField, IDNationalIdentityNumberField, IDLicensePlateField, IDProvinceSelect, IDLicensePlatePrefixSelect) from django.test import SimpleTestCase class IDLocalFlavorTests(SimpleTestCase): def setUp(self): self.save_warnings_state() warnings.filterwarnings( "ignore", category=RuntimeWarning, module='django.contrib.localflavor.id.id_choices' ) def tearDown(self): self.restore_warnings_state() def test_IDProvinceSelect(self): f = IDProvinceSelect() out = '''<select name="provinces"> <option value="ACE">Aceh</option> <option value="BLI">Bali</option> <option value="BTN">Banten</option> <option value="BKL">Bengkulu</option> <option value="DIY">Yogyakarta</option> <option value="JKT">Jakarta</option> <option value="GOR">Gorontalo</option> <option value="JMB">Jambi</option> <option value="JBR">Jawa Barat</option> <option value="JTG">Jawa Tengah</option> <option value="JTM">Jawa Timur</option> <option value="KBR">Kalimantan Barat</option> <option value="KSL">Kalimantan Selatan</option> <option value="KTG">Kalimantan Tengah</option> <option value="KTM">Kalimantan Timur</option> <option value="BBL">Kepulauan Bangka-Belitung</option> <option value="KRI">Kepulauan Riau</option> <option value="LPG" selected="selected">Lampung</option> <option value="MLK">Maluku</option> <option value="MUT">Maluku Utara</option> <option value="NTB">Nusa Tenggara Barat</option> <option value="NTT">Nusa Tenggara Timur</option> <option value="PPA">Papua</option> <option value="PPB">Papua Barat</option> <option value="RIU">Riau</option> <option value="SLB">Sulawesi Barat</option> <option value="SLS">Sulawesi Selatan</option> <option value="SLT">Sulawesi Tengah</option> <option value="SLR">Sulawesi Tenggara</option> <option value="SLU">Sulawesi Utara</option> <option value="SMB">Sumatera Barat</option> <option value="SMS">Sumatera Selatan</option> <option value="SMU">Sumatera Utara</option> </select>''' self.assertHTMLEqual(f.render('provinces', 'LPG'), out) def test_IDLicensePlatePrefixSelect(self): f = IDLicensePlatePrefixSelect() out = '''<select name="codes"> <option value="A">Banten</option> <option value="AA">Magelang</option> <option value="AB">Yogyakarta</option> <option value="AD">Surakarta - Solo</option> <option value="AE">Madiun</option> <option value="AG">Kediri</option> <option value="B">Jakarta</option> <option value="BA">Sumatera Barat</option> <option value="BB">Tapanuli</option> <option value="BD">Bengkulu</option> <option value="BE" selected="selected">Lampung</option> <option value="BG">Sumatera Selatan</option> <option value="BH">Jambi</option> <option value="BK">Sumatera Utara</option> <option value="BL">Nanggroe Aceh Darussalam</option> <option value="BM">Riau</option> <option value="BN">Kepulauan Bangka Belitung</option> <option value="BP">Kepulauan Riau</option> <option value="CC">Corps Consulate</option> <option value="CD">Corps Diplomatic</option> <option value="D">Bandung</option> <option value="DA">Kalimantan Selatan</option> <option value="DB">Sulawesi Utara Daratan</option> <option value="DC">Sulawesi Barat</option> <option value="DD">Sulawesi Selatan</option> <option value="DE">Maluku</option> <option value="DG">Maluku Utara</option> <option value="DH">NTT - Timor</option> <option value="DK">Bali</option> <option value="DL">Sulawesi Utara Kepulauan</option> <option value="DM">Gorontalo</option> <option value="DN">Sulawesi Tengah</option> <option value="DR">NTB - Lombok</option> <option value="DS">Papua dan Papua Barat</option> <option value="DT">Sulawesi Tenggara</option> <option value="E">Cirebon</option> <option value="EA">NTB - Sumbawa</option> <option value="EB">NTT - Flores</option> <option value="ED">NTT - Sumba</option> <option value="F">Bogor</option> <option value="G">Pekalongan</option> <option value="H">Semarang</option> <option value="K">Pati</option> <option value="KB">Kalimantan Barat</option> <option value="KH">Kalimantan Tengah</option> <option value="KT">Kalimantan Timur</option> <option value="L">Surabaya</option> <option value="M">Madura</option> <option value="N">Malang</option> <option value="P">Jember</option> <option value="R">Banyumas</option> <option value="RI">Federal Government</option> <option value="S">Bojonegoro</option> <option value="T">Purwakarta</option> <option value="W">Sidoarjo</option> <option value="Z">Garut</option> </select>''' self.assertHTMLEqual(f.render('codes', 'BE'), out) def test_IDPhoneNumberField(self): error_invalid = ['Enter a valid phone number'] valid = { '0812-3456789': '0812-3456789', '081234567890': '081234567890', '021 345 6789': '021 345 6789', '0213456789': '0213456789', '+62-21-3456789': '+62-21-3456789', '(021) 345 6789': '(021) 345 6789', } invalid = { '0123456789': error_invalid, '+62-021-3456789': error_invalid, '+62-0812-3456789': error_invalid, '0812345678901': error_invalid, 'foo': error_invalid, } self.assertFieldOutput(IDPhoneNumberField, valid, invalid) def test_IDPostCodeField(self): error_invalid = ['Enter a valid post code'] valid = { '12340': '12340', '25412': '25412', ' 12340 ': '12340', } invalid = { '12 3 4 0': error_invalid, '12345': error_invalid, '10100': error_invalid, '123456': error_invalid, 'foo': error_invalid, } self.assertFieldOutput(IDPostCodeField, valid, invalid) def test_IDNationalIdentityNumberField(self): error_invalid = ['Enter a valid NIK/KTP number'] valid = { ' 12.3456.010178 3456 ': '12.3456.010178.3456', '1234560101783456': '12.3456.010178.3456', '12.3456.010101.3456': '12.3456.010101.3456', } invalid = { '12.3456.310278.3456': error_invalid, '00.0000.010101.0000': error_invalid, '1234567890123456': error_invalid, 'foo': error_invalid, } self.assertFieldOutput(IDNationalIdentityNumberField, valid, invalid) def test_IDLicensePlateField(self): error_invalid = ['Enter a valid vehicle license plate number'] valid = { ' b 1234 ab ': 'B 1234 AB', 'B 1234 ABC': 'B 1234 ABC', 'A 12': 'A 12', 'DK 12345 12': 'DK 12345 12', 'RI 10': 'RI 10', 'CD 12 12': 'CD 12 12', } invalid = { 'CD 10 12': error_invalid, 'CD 1234 12': error_invalid, 'RI 10 AB': error_invalid, 'B 12345 01': error_invalid, 'N 1234 12': error_invalid, 'A 12 XYZ': error_invalid, 'Q 1234 AB': error_invalid, 'foo': error_invalid, } self.assertFieldOutput(IDLicensePlateField, valid, invalid)
bsd-3-clause
mat128/python-novaclient
novaclient/tests/unit/test_api_versions.py
3
14278
# Copyright 2016 Mirantis # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import novaclient from novaclient import api_versions from novaclient import exceptions from novaclient.openstack.common import cliutils from novaclient.tests.unit import utils from novaclient.v2 import versions class APIVersionTestCase(utils.TestCase): def test_valid_version_strings(self): def _test_string(version, exp_major, exp_minor): v = api_versions.APIVersion(version) self.assertEqual(v.ver_major, exp_major) self.assertEqual(v.ver_minor, exp_minor) _test_string("1.1", 1, 1) _test_string("2.10", 2, 10) _test_string("5.234", 5, 234) _test_string("12.5", 12, 5) _test_string("2.0", 2, 0) _test_string("2.200", 2, 200) def test_null_version(self): v = api_versions.APIVersion() self.assertTrue(v.is_null()) def test_invalid_version_strings(self): self.assertRaises(exceptions.UnsupportedVersion, api_versions.APIVersion, "2") self.assertRaises(exceptions.UnsupportedVersion, api_versions.APIVersion, "200") self.assertRaises(exceptions.UnsupportedVersion, api_versions.APIVersion, "2.1.4") self.assertRaises(exceptions.UnsupportedVersion, api_versions.APIVersion, "200.23.66.3") self.assertRaises(exceptions.UnsupportedVersion, api_versions.APIVersion, "5 .3") self.assertRaises(exceptions.UnsupportedVersion, api_versions.APIVersion, "5. 3") self.assertRaises(exceptions.UnsupportedVersion, api_versions.APIVersion, "5.03") self.assertRaises(exceptions.UnsupportedVersion, api_versions.APIVersion, "02.1") self.assertRaises(exceptions.UnsupportedVersion, api_versions.APIVersion, "2.001") self.assertRaises(exceptions.UnsupportedVersion, api_versions.APIVersion, "") self.assertRaises(exceptions.UnsupportedVersion, api_versions.APIVersion, " 2.1") self.assertRaises(exceptions.UnsupportedVersion, api_versions.APIVersion, "2.1 ") def test_version_comparisons(self): v1 = api_versions.APIVersion("2.0") v2 = api_versions.APIVersion("2.5") v3 = api_versions.APIVersion("5.23") v4 = api_versions.APIVersion("2.0") v_null = api_versions.APIVersion() self.assertTrue(v1 < v2) self.assertTrue(v3 > v2) self.assertTrue(v1 != v2) self.assertTrue(v1 == v4) self.assertTrue(v1 != v_null) self.assertTrue(v_null == v_null) self.assertRaises(TypeError, v1.__le__, "2.1") def test_version_matches(self): v1 = api_versions.APIVersion("2.0") v2 = api_versions.APIVersion("2.5") v3 = api_versions.APIVersion("2.45") v4 = api_versions.APIVersion("3.3") v5 = api_versions.APIVersion("3.23") v6 = api_versions.APIVersion("2.0") v7 = api_versions.APIVersion("3.3") v8 = api_versions.APIVersion("4.0") v_null = api_versions.APIVersion() self.assertTrue(v2.matches(v1, v3)) self.assertTrue(v2.matches(v1, v_null)) self.assertTrue(v1.matches(v6, v2)) self.assertTrue(v4.matches(v2, v7)) self.assertTrue(v4.matches(v_null, v7)) self.assertTrue(v4.matches(v_null, v8)) self.assertFalse(v1.matches(v2, v3)) self.assertFalse(v5.matches(v2, v4)) self.assertFalse(v2.matches(v3, v1)) self.assertRaises(ValueError, v_null.matches, v1, v3) def test_get_string(self): v1_string = "3.23" v1 = api_versions.APIVersion(v1_string) self.assertEqual(v1_string, v1.get_string()) self.assertRaises(ValueError, api_versions.APIVersion().get_string) class UpdateHeadersTestCase(utils.TestCase): def test_api_version_is_null(self): headers = {} api_versions.update_headers(headers, api_versions.APIVersion()) self.assertEqual({}, headers) def test_api_version_is_major(self): headers = {} api_versions.update_headers(headers, api_versions.APIVersion("7.0")) self.assertEqual({}, headers) def test_api_version_is_not_null(self): api_version = api_versions.APIVersion("2.3") headers = {} api_versions.update_headers(headers, api_version) self.assertEqual( {"X-OpenStack-Nova-API-Version": api_version.get_string()}, headers) class GetAPIVersionTestCase(utils.TestCase): def test_get_available_client_versions(self): output = api_versions.get_available_major_versions() self.assertNotEqual([], output) def test_wrong_format(self): self.assertRaises(exceptions.UnsupportedVersion, api_versions.get_api_version, "something_wrong") def test_wrong_major_version(self): self.assertRaises(exceptions.UnsupportedVersion, api_versions.get_api_version, "1") @mock.patch("novaclient.api_versions.APIVersion") def test_only_major_part_is_presented(self, mock_apiversion): version = 7 self.assertEqual(mock_apiversion.return_value, api_versions.get_api_version(version)) mock_apiversion.assert_called_once_with("%s.0" % str(version)) @mock.patch("novaclient.api_versions.APIVersion") def test_major_and_minor_parts_is_presented(self, mock_apiversion): version = "2.7" self.assertEqual(mock_apiversion.return_value, api_versions.get_api_version(version)) mock_apiversion.assert_called_once_with(version) class WrapsTestCase(utils.TestCase): def _get_obj_with_vers(self, vers): return mock.MagicMock(api_version=api_versions.APIVersion(vers)) def _side_effect_of_vers_method(self, *args, **kwargs): m = mock.MagicMock(start_version=args[1], end_version=args[2]) m.name = args[0] return m @mock.patch("novaclient.utils.get_function_name") @mock.patch("novaclient.api_versions.VersionedMethod") def test_end_version_is_none(self, mock_versioned_method, mock_name): func_name = "foo" mock_name.return_value = func_name mock_versioned_method.side_effect = self._side_effect_of_vers_method @api_versions.wraps("2.2") def foo(*args, **kwargs): pass foo(self._get_obj_with_vers("2.4")) mock_versioned_method.assert_called_once_with( func_name, api_versions.APIVersion("2.2"), api_versions.APIVersion("2.latest"), mock.ANY) @mock.patch("novaclient.utils.get_function_name") @mock.patch("novaclient.api_versions.VersionedMethod") def test_start_and_end_version_are_presented(self, mock_versioned_method, mock_name): func_name = "foo" mock_name.return_value = func_name mock_versioned_method.side_effect = self._side_effect_of_vers_method @api_versions.wraps("2.2", "2.6") def foo(*args, **kwargs): pass foo(self._get_obj_with_vers("2.4")) mock_versioned_method.assert_called_once_with( func_name, api_versions.APIVersion("2.2"), api_versions.APIVersion("2.6"), mock.ANY) @mock.patch("novaclient.utils.get_function_name") @mock.patch("novaclient.api_versions.VersionedMethod") def test_api_version_doesnt_match(self, mock_versioned_method, mock_name): func_name = "foo" mock_name.return_value = func_name mock_versioned_method.side_effect = self._side_effect_of_vers_method @api_versions.wraps("2.2", "2.6") def foo(*args, **kwargs): pass self.assertRaises(exceptions.VersionNotFoundForAPIMethod, foo, self._get_obj_with_vers("2.1")) mock_versioned_method.assert_called_once_with( func_name, api_versions.APIVersion("2.2"), api_versions.APIVersion("2.6"), mock.ANY) def test_define_method_is_actually_called(self): checker = mock.MagicMock() @api_versions.wraps("2.2", "2.6") def some_func(*args, **kwargs): checker(*args, **kwargs) obj = self._get_obj_with_vers("2.4") some_args = ("arg_1", "arg_2") some_kwargs = {"key1": "value1", "key2": "value2"} some_func(obj, *some_args, **some_kwargs) checker.assert_called_once_with(*((obj,) + some_args), **some_kwargs) def test_cli_args_are_copied(self): @api_versions.wraps("2.2", "2.6") @cliutils.arg("name_1", help="Name of the something") @cliutils.arg("action_1", help="Some action") def some_func_1(cs, args): pass @cliutils.arg("name_2", help="Name of the something") @cliutils.arg("action_2", help="Some action") @api_versions.wraps("2.2", "2.6") def some_func_2(cs, args): pass args_1 = [(('name_1',), {'help': 'Name of the something'}), (('action_1',), {'help': 'Some action'})] self.assertEqual(args_1, some_func_1.arguments) args_2 = [(('name_2',), {'help': 'Name of the something'}), (('action_2',), {'help': 'Some action'})] self.assertEqual(args_2, some_func_2.arguments) class DiscoverVersionTestCase(utils.TestCase): def setUp(self): super(DiscoverVersionTestCase, self).setUp() self.orig_max = novaclient.API_MAX_VERSION self.orig_min = novaclient.API_MIN_VERSION self.addCleanup(self._clear_fake_version) def _clear_fake_version(self): novaclient.API_MAX_VERSION = self.orig_max novaclient.API_MIN_VERSION = self.orig_min def test_server_is_too_new(self): fake_client = mock.MagicMock() fake_client.versions.get_current.return_value = mock.MagicMock( version="2.7", min_version="2.4") novaclient.API_MAX_VERSION = api_versions.APIVersion("2.3") novaclient.API_MIN_VERSION = api_versions.APIVersion("2.1") self.assertRaises(exceptions.UnsupportedVersion, api_versions.discover_version, fake_client, api_versions.APIVersion('2.latest')) def test_server_is_too_old(self): fake_client = mock.MagicMock() fake_client.versions.get_current.return_value = mock.MagicMock( version="2.7", min_version="2.4") novaclient.API_MAX_VERSION = api_versions.APIVersion("2.10") novaclient.API_MIN_VERSION = api_versions.APIVersion("2.9") self.assertRaises(exceptions.UnsupportedVersion, api_versions.discover_version, fake_client, api_versions.APIVersion('2.latest')) def test_server_end_version_is_the_latest_one(self): fake_client = mock.MagicMock() fake_client.versions.get_current.return_value = mock.MagicMock( version="2.7", min_version="2.4") novaclient.API_MAX_VERSION = api_versions.APIVersion("2.11") novaclient.API_MIN_VERSION = api_versions.APIVersion("2.1") self.assertEqual( "2.7", api_versions.discover_version( fake_client, api_versions.APIVersion('2.latest')).get_string()) def test_client_end_version_is_the_latest_one(self): fake_client = mock.MagicMock() fake_client.versions.get_current.return_value = mock.MagicMock( version="2.16", min_version="2.4") novaclient.API_MAX_VERSION = api_versions.APIVersion("2.11") novaclient.API_MIN_VERSION = api_versions.APIVersion("2.1") self.assertEqual( "2.11", api_versions.discover_version( fake_client, api_versions.APIVersion('2.latest')).get_string()) def test_server_without_microversion(self): fake_client = mock.MagicMock() fake_client.versions.get_current.return_value = mock.MagicMock( version='', min_version='') novaclient.API_MAX_VERSION = api_versions.APIVersion("2.11") novaclient.API_MIN_VERSION = api_versions.APIVersion("2.1") self.assertEqual( "2.0", api_versions.discover_version( fake_client, api_versions.APIVersion('2.latest')).get_string()) def test_server_without_microversion_and_no_version_field(self): fake_client = mock.MagicMock() fake_client.versions.get_current.return_value = versions.Version( None, {}) novaclient.API_MAX_VERSION = api_versions.APIVersion("2.11") novaclient.API_MIN_VERSION = api_versions.APIVersion("2.1") self.assertEqual( "2.0", api_versions.discover_version( fake_client, api_versions.APIVersion('2.latest')).get_string()) def test_server_without_microversion_rax_workaround(self): fake_client = mock.MagicMock() fake_client.versions.get_current.return_value = None novaclient.API_MAX_VERSION = api_versions.APIVersion("2.11") novaclient.API_MIN_VERSION = api_versions.APIVersion("2.1") self.assertEqual( "2.0", api_versions.discover_version( fake_client, api_versions.APIVersion('2.latest')).get_string())
apache-2.0
ben1/Stronghold
src/actors.py
1
1368
''' Actor is the base class for central characters in the game. It contains some basic information as well as relationships to other characters (from their perspective). This module also contains a number of core characters. ''' class Actor(): nextId = 1 def __init__(self, name, firstName): self.id = Actor.nextId Actor.nextId += 1 self.name = name self.firstName = firstName self.isPlayer = False self.feelingsFor = {} self.respectFor = {} self.introduced = False self.description = 'A non-descript person.' def modFeelingsFor(self, actor, value): self.feelingsFor[actor] = self.feelingsFor.get(actor, 0) + value def getFeelingsFor(self, actor): return self.feelingsFor.get(actor, 0) def modRespectFor(self, actor, value): self.respectFor[actor] = self.respectFor.get(actor, 0) + value def getRespectFor(self, actor): return self.respectFor.get(actor, 0) class Player(Actor): def __init__(self): super().__init__('Some player', 'Player') self.isPlayer = True class Emperor(Actor): def __init__(self): super().__init__('Emperor Kanate', 'Emperor') class Advisor(Actor): def __init__(self): super().__init__('Johan Kratz', 'Johan')
mit
kbidarkar/robottelo
robottelo/ui/computeresource.py
2
23459
# -*- encoding: utf-8 -*- from robottelo import ssh from robottelo.constants import FILTER, FOREMAN_PROVIDERS from nailgun import entities from robottelo.ui.base import Base, UINoSuchElementError, UIError from robottelo.helpers import ProvisioningCheckError from robottelo.ui.locators import common_locators, locators, tab_locators from robottelo.ui.navigator import Navigator class ResourceProfileFormBase(object): """Base class for compute resources profiles forms""" _page = None # some fields are like two panel and to select from the left one to the # right as users groups and roles # please see how implemented in ResourceProfileFormEC2 for security_groups selector_fields = [] # some fields can be part of sections that can be added # like storage and networks, please how implemented in # ResourceProfileFormRHEV (implement network_interfaces and storage) group_fields_locators = {} def __init__(self, page): """Initiate compute resource profile form :type page: ComputeProfile :param page: The compute profile object ComputeProfile or ComputeResource """ self._page = page @property def page(self): """Return the current page ComputeResource or ComputeProfile""" return self._page def _clean_value(self, name, value): """Check some values and correct them accordingly""" if name in self.selector_fields: if not isinstance(value, (list, tuple)): value = [value] return value def _assign_locator_value(self, target, value): """Assign provided value to page element depending on the type of that element """ target_type = self.page.element_type(target) if (target_type == 'span' or target_type == 'select') and ' (' in value: # do all the necessary workaround self.page.click(target) # Typing entity value without parenthesis part self.page.assign_value( common_locators['select_list_search_box'], value.split(' (') [0]) # selecting Value by its full name (with parenthesis # part) self.page.click( common_locators['entity_select_list_vmware'] % value.split (' (')[0]) pass else: self.page.assign_value(target, value) def set_value(self, name, value): """Set the value of the corresponding field in UI""" locator_attr = '{0}_locator'.format(name) locator = getattr(self, locator_attr, None) if locator is None and name not in self.group_fields_locators: raise UIError('Field name: {0} not supported'.format(name)) value = self._clean_value(name, value) if name in self.selector_fields: self.page.configure_entity(value, locator) elif name in self.group_fields_locators: field_index = 0 group_fields_locators = self.group_fields_locators[name] add_node_locator = group_fields_locators['_add_node'] for group_field in value: if group_field is not None: for field_key, field_value in group_field.items(): field_locator = group_fields_locators.get(field_key) available_fields = self.page.find_elements( field_locator) if len(available_fields) - 1 < field_index: self.page.click(add_node_locator) available_fields = self.page.find_elements( field_locator) self._assign_locator_value( available_fields[field_index], field_value) field_index += 1 else: self._assign_locator_value(locator, value) def set_values(self, **kwargs): """Set the values of the corresponding fields in UI""" for key, value in kwargs.items(): self.set_value(key, value) def submit(self): """Press the submit form button""" self.page.click(common_locators['submit']) class ResourceProfileFormEC2(ResourceProfileFormBase): """Implement EC2 compute resource profile form""" flavor_locator = locators["resource.compute_profile.ec2_flavor"] image_locator = locators["resource.compute_profile.ec2_image"] subnet_locator = locators["resource.compute_profile.ec2_subnet"] managed_ip_locator = locators["resource.compute_profile.ec2_managed_ip"] availability_zone_locator = locators[ "resource.compute_profile.ec2_availability_zone"] security_groups_locator = FILTER['ec2_security_groups'] selector_fields = ['security_groups'] def _clean_value(self, name, value): """Check some values and correct them accordingly""" value = ResourceProfileFormBase._clean_value(self, name, value) if not value: if name == 'availability_zone': value = 'No preference' elif name == 'subnet': value = 'EC2' elif name == 'security_groups': value = [] return value class ResourceProfileFormRHEV(ResourceProfileFormBase): """Implement RHEV compute resource profile form""" cluster_locator = locators["resource.compute_profile.rhev_cluster"] template_locator = locators["resource.compute_profile.rhev_template"] cores_locator = locators["resource.compute_profile.rhev_cores"] memory_locator = locators["resource.compute_profile.rhev_memory"] image_locator = locators["resource.compute_profile.rhev_image"] group_fields_locators = dict( network_interfaces=dict( _add_node=locators[ "resource.compute_profile.interface_add_node"], name=locators["resource.compute_profile.rhev_interface_name"], network=locators["resource.compute_profile.rhev_interface_network"] ), storage=dict( _add_node=locators[ "resource.compute_profile.storage_add_node"], size=locators["resource.compute_profile.rhev_storage_size"], storage_domain=locators[ "resource.compute_profile.rhev_storage_domain"], preallocate_disk=locators[ "resource.compute_profile.rhev_storage_preallocate"], bootable=locators["resource.compute_profile.rhev_storage_bootable"] ) ) def set_values(self, **kwargs): """Set the values of the corresponding fields in UI""" # if template is the fields to set, it set in priority as, when # selecting a template, configuration data is loaded in UI template_key = 'template' template = kwargs.get(template_key) if template is not None: self.set_value(template_key, template) del kwargs[template_key] ResourceProfileFormBase.set_values(self, **kwargs) class ResourceProfileFormVMware(ResourceProfileFormBase): """Implement VMware compute resource profile form""" cpus_locator = locators["resource.compute_profile.vmware_cpus"] corespersocket_locator = locators[ "resource.compute_profile.vmware_corespersocket"] memory_locator = locators["resource.compute_profile.vmware_memory"] cluster_locator = locators["resource.compute_profile.vmware_cluster"] folder_locator = locators["resource.compute_profile.vmware_folder"] guest_os_locator = locators["resource.compute_profile.vmware_guest_os"] scsicontroller_locator = locators[ "resource.compute_profile.vmware_scsicontroller"] virtualhw_version_locator = locators[ "resource.compute_profile.vmware_virtualhw_version"] memory_hotadd_locator = locators[ "resource.compute_profile.vmware_memory_hotadd"] cpu_hotadd_locator = locators[ "resource.compute_profile.vmware_cpu_hotadd"] cdrom_drive_locator = locators[ "resource.compute_profile.vmware_cdrom_drive"] annotation_notes_locator = locators[ "resource.compute_profile.vmware_annotation_notes"] image_locator = locators["resource.compute_profile.rhev_image"] pool_locator = locators[ "resource.compute_profile.vmware_resource_pool"] group_fields_locators = dict( network_interfaces=dict( _add_node=locators[ "resource.compute_profile.interface_add_node"], name=locators["resource.compute_profile.vmware_interface_name"], network=locators[ "resource.compute_profile.vmware_interface_network"] ), storage=dict( _add_node=locators[ "resource.compute_profile.storage_add_node"], datastore=locators[ "resource.compute_profile.vmware_storage_datastore"], size=locators["resource.compute_profile.vmware_storage_size"], thin_provision=locators[ "resource.compute_profile.vmware_storage_thin_provision"], eager_zero=locators[ "resource.compute_profile.vmware_storage_eager_zero"], disk_mode=locators["resource.compute_profile.vmware_disk_mode"] ), ) _compute_resource_profiles = { FOREMAN_PROVIDERS['ec2']: ResourceProfileFormEC2, FOREMAN_PROVIDERS['rhev']: ResourceProfileFormRHEV, FOREMAN_PROVIDERS['vmware']: ResourceProfileFormVMware, } def get_compute_resource_profile(page, res_type=None): """Return the corresponding instance compute resource profile form object """ resource_profile_class = _compute_resource_profiles.get(res_type) if not resource_profile_class: raise UIError( 'Resource profile for resource type: {0}' ' not supported'.format(res_type) ) return resource_profile_class(page) class ComputeResource(Base): """Provides the CRUD functionality for Compute Resources.""" def navigate_to_entity(self): """Navigate to Compute Resource entity page""" Navigator(self.browser).go_to_compute_resources() def _search_locator(self): """Specify locator for Compute Resource entity search procedure""" return locators['resource.select_name'] def _configure_resource_provider( self, provider_type=None, parameter_list=None): """Provide configuration capabilities for compute resource provider. All values should be passed in absolute correspondence to UI. For example, we need to input some data to 'URL' field, select checkbox 'Console Passwords' and choose 'SPICE' value from select list, so next parameter list should be passed:: [ ['URL', libvirt_url, 'field'], ['Display Type', 'SPICE', 'select'], ['Console passwords', False, 'checkbox'] ] We have cases when it is necessary to push a button to populate values for select list. For such scenarios we have 'special select' parameter type. For example, for 'RHEV' provider, we need to click 'Load Datacenters' button to get values for 'Datacenter' list:: [ ['Description', 'My_Test', 'field'], ['URL', libvirt_url, 'field'], ['Username', 'admin', 'field'], ['Password', 'test', 'field'], ['X509 Certification Authorities', 'test', 'field'], ['Datacenter', 'test', 'special select'], ] """ if provider_type: self.select(locators['resource.provider_type'], provider_type) if parameter_list is None: return for parameter_name, parameter_value, parameter_type in parameter_list: if parameter_name.find('/') >= 0: _, parameter_name = parameter_name.split('/') param_locator = '.'.join(( 'resource', (parameter_name.lower()).replace(' ', '_') )) self.wait_until_element(locators[param_locator]) if parameter_type != 'special select': self.assign_value( locators[param_locator], parameter_value) else: button_locator = '.'.join(( 'resource', (parameter_name.lower()).replace(' ', '_'), 'button' )) self.click(locators[button_locator]) self.assign_value(locators[param_locator], parameter_value) def _configure_orgs(self, orgs, org_select): """Provides configuration capabilities for compute resource organization. The following format should be used:: orgs=['Aoes6V', 'JIFNPC'], org_select=True """ self.configure_entity( orgs, FILTER['cr_org'], tab_locator=tab_locators['tab_org'], entity_select=org_select ) def _configure_locations(self, locations, loc_select): """Provides configuration capabilities for compute resource location The following format should be used:: locations=['Default Location'], loc_select=True """ self.configure_entity( locations, FILTER['cr_loc'], tab_locator=tab_locators['tab_loc'], entity_select=loc_select ) def create(self, name, provider_type, parameter_list, orgs=None, org_select=None, locations=None, loc_select=None): """Creates a compute resource.""" self.click(locators['resource.new']) self.assign_value(locators['resource.name'], name) self._configure_resource_provider(provider_type, parameter_list) if locations: self._configure_locations(locations, loc_select) if orgs: self._configure_orgs(orgs, org_select) self.click(common_locators['submit']) def update(self, name, newname=None, parameter_list=None, orgs=None, org_select=None, locations=None, loc_select=None): """Updates compute resource entity.""" element = self.search(name) if element is None: raise UINoSuchElementError( 'Could not find the resource {0}'.format(name)) self.click(locators['resource.edit'] % name) self.wait_until_element(locators['resource.name']) if newname: self.assign_value(locators['resource.name'], newname) self._configure_resource_provider(parameter_list=parameter_list) if locations: self._configure_locations(locations, loc_select) if orgs: self._configure_orgs(orgs, org_select) self.click(common_locators['submit']) def search_container(self, cr_name, container_name): """Searches for specific container located in compute resource under 'Containers' tab """ self.search_and_click(cr_name) self.click(tab_locators['resource.tab_containers']) self.assign_value( locators['resource.filter_containers'], container_name) return self.wait_until_element( locators['resource.select_container'] % container_name) def list_vms(self, res_name): """Lists vms of a particular compute resource. Note: Currently lists only vms that show up on the first page. """ self.search_and_click(res_name) self.click(tab_locators['resource.tab_virtual_machines']) vm_elements = self.find_elements(locators['resource.vm_list']) return [vm.text for vm in vm_elements] def add_image(self, res_name, parameter_list): """Adds an image to a compute resource.""" self.search_and_click(res_name) self.click(locators['resource.image_add']) self.wait_until_element(locators['resource.image_name']) for parameter_name, parameter_value in parameter_list: param_locator = '_'.join(( 'resource.image', (parameter_name.lower()) )) self.assign_value(locators[param_locator], parameter_value) self.click(locators['resource.image_submit']) def list_images(self, res_name): """Lists images on Compute Resource. Note: Currently lists only images that show up on the first page. """ self.search_and_click(res_name) self.click(tab_locators['resource.tab_images']) image_elements = self.find_elements(locators['resource.image_list']) return [image.text for image in image_elements] def vm_action_toggle(self, res_name, vm_name, really): """Toggle power status of a vm on the compute resource.""" self.search_and_click(res_name) self.click(tab_locators['resource.tab_virtual_machines']) button = self.find_element( locators['resource.vm_power_button'] % vm_name ) self.click(button) if "Off" in button.text: self.handle_alert(really) def vm_delete(self, res_name, vm_name, really): """Removes a vm from the compute resource.""" self.search_and_click(res_name) self.click(tab_locators['resource.tab_virtual_machines']) for locator in [locators['resource.vm_delete_button_dropdown'], locators['resource.vm_delete_button']]: self.click(locator % vm_name) self.handle_alert(really) def search_vm(self, resource_name, vm_name): """Searches for existing Virtual machine from particular compute resource. It is necessary to use custom search here as we need to select compute resource tab before searching for particular Virtual machine and also, there is no search button to click """ self.search_and_click(resource_name) self.click(tab_locators['resource.tab_virtual_machines']) self.assign_value( locators['resource.search_filter'], vm_name) strategy, value = self._search_locator() return self.wait_until_element((strategy, value % vm_name)) def power_on_status(self, resource_name, vm_name): """Return the compute resource virtual machine power status :param resource_name: The compute resource name :param vm_name: the virtual machine name :return: on or off """ element = self.search_vm(resource_name, vm_name) if element is None: raise UIError( 'Could not find Virtual machine "{0}"'.format(vm_name)) return self.wait_until_element( locators['resource.power_status']).text.lower() def set_power_status(self, resource_name, vm_name, power_on=None): """Perform power on or power off for VM's :param bool power_on: True - for On, False - for Off """ status = None locator_status = locators['resource.power_status'] element = self.search_vm(resource_name, vm_name) if element is None: raise UIError( 'Could not find Virtual machine "{0}"'.format(vm_name)) button = self.find_element( locators['resource.vm_power_button'] ) if power_on is True: if 'On' not in button.text: raise UIError( 'Could not start VM {0}. VM is running'.format(vm_name) ) self.click(button) self.search_vm(resource_name, vm_name) status = self.wait_until_element(locator_status).text elif power_on is False: if 'Off' not in button.text: raise UIError( 'Could not stop VM {0}. VM is not running'.format(vm_name) ) self.click(button, wait_for_ajax=False) self.handle_alert(True) self.search_vm(resource_name, vm_name) status = self.wait_until_element(locator_status).text return status def select_profile(self, resource_name, profile_name): """Select the compute profile of a specific compute resource :param resource_name: Name of compute resource to select from the list :param profile_name: Name of profile that contains required compute resource (e.g. '2-Medium' or '1-Small') :return: resource type and the resource profile form element :returns: tuple """ resource_element = self.search(resource_name) resource_type = self.wait_until_element( locators['resource.resource_type'] % resource_name).text self.click(resource_element) self.click(tab_locators['resource.tab_compute_profiles']) self.click(locators["resource.compute_profile"] % profile_name) return (resource_type, self.wait_until_element(locators['profile.resource_form'])) def set_profile_values(self, resource_name, profile_name, **kwargs): """Fill and Submit the compute resource profile form configuration properties :param resource_name: Name of compute resource to select from the list :param profile_name: Name of profile that contains required compute resource (e.g. '2-Medium' or '1-Small') :param kwargs: the compute resource profile configuration properties fields to be set """ resource_type, _ = self.select_profile(resource_name, profile_name) resource_profile_form = get_compute_resource_profile( self, resource_type) resource_profile_form.set_values(**kwargs) resource_profile_form.submit() def host_provisioning_check(self, ip_addr): """Check the provisioned host status by pinging the ip of host and check to connect to ssh port :param ip_addr: IP address of the provisioned host :return: ssh command return code and stdout """ result = ssh.command( u'for i in {{1..60}}; do ping -c1 {0} && exit 0; sleep 20;' u' done; exit 1'.format(ip_addr)) if result.return_code != 0: raise ProvisioningCheckError( 'Failed to ping virtual machine Error:{0}'.format( result.stdout)) ssh_check = ssh.command( u'for i in {{1..60}}; do nc -vn {0} 22 <<< "" && exit 0; sleep 20;' u' done; exit 1'.format(ip_addr)) if ssh_check.return_code != 0: raise ProvisioningCheckError( 'Failed to connect to SSH port of the virtual machine') def check_image_os(self, os_name): """Check if the OS is present, if not create the required OS :param os_name: OS name to check, and create :return: Created os """ # Check if OS that image needs is present or no, If not create the OS result = entities.OperatingSystem().search(query={ u'search': u'title="{0}"'.format(os_name) }) if result: os = result[0] else: os = entities.OperatingSystem( name=os_name.split(' ')[0], major=os_name.split(' ')[1].split('.')[0], minor=os_name.split(' ')[1].split('.')[1], ).create() return os
gpl-3.0
eemirtekin/edx-platform
lms/djangoapps/instructor_analytics/tests/test_csvs.py
175
4757
""" Tests for analytics.csvs """ from django.test import TestCase from nose.tools import raises from instructor_analytics.csvs import create_csv_response, format_dictlist, format_instances class TestAnalyticsCSVS(TestCase): """ Test analytics rendering of csv files.""" def test_create_csv_response_nodata(self): header = ['Name', 'Email'] datarows = [] res = create_csv_response('robot.csv', header, datarows) self.assertEqual(res['Content-Type'], 'text/csv') self.assertEqual(res['Content-Disposition'], 'attachment; filename={0}'.format('robot.csv')) self.assertEqual(res.content.strip(), '"Name","Email"') def test_create_csv_response(self): header = ['Name', 'Email'] datarows = [['Jim', 'jim@edy.org'], ['Jake', 'jake@edy.org'], ['Jeeves', 'jeeves@edy.org']] res = create_csv_response('robot.csv', header, datarows) self.assertEqual(res['Content-Type'], 'text/csv') self.assertEqual(res['Content-Disposition'], 'attachment; filename={0}'.format('robot.csv')) self.assertEqual(res.content.strip(), '"Name","Email"\r\n"Jim","jim@edy.org"\r\n"Jake","jake@edy.org"\r\n"Jeeves","jeeves@edy.org"') def test_create_csv_response_empty(self): header = [] datarows = [] res = create_csv_response('robot.csv', header, datarows) self.assertEqual(res['Content-Type'], 'text/csv') self.assertEqual(res['Content-Disposition'], 'attachment; filename={0}'.format('robot.csv')) self.assertEqual(res.content.strip(), '') class TestAnalyticsFormatDictlist(TestCase): """ Test format_dictlist method """ def test_format_dictlist(self): dictlist = [ { 'label1': 'value-1,1', 'label2': 'value-1,2', 'label3': 'value-1,3', 'label4': 'value-1,4', }, { 'label1': 'value-2,1', 'label2': 'value-2,2', 'label3': 'value-2,3', 'label4': 'value-2,4', } ] features = ['label1', 'label4'] header, datarows = format_dictlist(dictlist, features) ideal_header = ['label1', 'label4'] ideal_datarows = [['value-1,1', 'value-1,4'], ['value-2,1', 'value-2,4']] self.assertEqual(header, ideal_header) self.assertEqual(datarows, ideal_datarows) def test_format_dictlist_empty(self): header, datarows = format_dictlist([], []) self.assertEqual(header, []) self.assertEqual(datarows, []) def test_create_csv_response(self): header = ['Name', 'Email'] datarows = [['Jim', 'jim@edy.org'], ['Jake', 'jake@edy.org'], ['Jeeves', 'jeeves@edy.org']] res = create_csv_response('robot.csv', header, datarows) self.assertEqual(res['Content-Type'], 'text/csv') self.assertEqual(res['Content-Disposition'], 'attachment; filename={0}'.format('robot.csv')) self.assertEqual(res.content.strip(), '"Name","Email"\r\n"Jim","jim@edy.org"\r\n"Jake","jake@edy.org"\r\n"Jeeves","jeeves@edy.org"') class TestAnalyticsFormatInstances(TestCase): """ test format_instances method """ class TestDataClass(object): """ Test class to generate objects for format_instances """ def __init__(self): self.a_var = 'aval' self.b_var = 'bval' self.c_var = 'cval' @property def d_var(self): """ accessor to see if they work too """ return 'dval' def setUp(self): super(TestAnalyticsFormatInstances, self).setUp() self.instances = [self.TestDataClass() for _ in xrange(5)] def test_format_instances_response(self): features = ['a_var', 'c_var', 'd_var'] header, datarows = format_instances(self.instances, features) self.assertEqual(header, ['a_var', 'c_var', 'd_var']) self.assertEqual(datarows, [[ 'aval', 'cval', 'dval', ] for _ in xrange(len(self.instances))]) def test_format_instances_response_noinstances(self): features = ['a_var'] header, datarows = format_instances([], features) self.assertEqual(header, features) self.assertEqual(datarows, []) def test_format_instances_response_nofeatures(self): header, datarows = format_instances(self.instances, []) self.assertEqual(header, []) self.assertEqual(datarows, [[] for _ in xrange(len(self.instances))]) @raises(AttributeError) def test_format_instances_response_nonexistantfeature(self): format_instances(self.instances, ['robot_not_a_real_feature'])
agpl-3.0
abligh/xen4.2-minideb
tools/xm-test/tests/create/11_create_concurrent_pos.py
42
1787
#!/usr/bin/python # Copyright (C) International Business Machines Corp., 2005 # Authors: Dan Smith <danms@us.ibm.com> from XmTestLib import * import time import random if ENABLE_HVM_SUPPORT: MAX_DOMS = getMaxHVMDomains() if MAX_DOMS > 50: MAX_DOMS = 50 else: MAX_DOMS = 50 MIN_DOMS = 5 MEM_PER_DOM = minSafeMem() domains = [] console = [] free_mem = int(getInfo("free_memory")) NUM_DOMS = free_mem / MEM_PER_DOM if NUM_DOMS < MIN_DOMS: SKIP("Need %i MB of RAM to start %i@%iMB domains! (%i MB avail)" % (MIN_DOMS * MEM_PER_DOM, MIN_DOMS, MEM_PER_DOM, free_mem)) if NUM_DOMS > MAX_DOMS: if verbose: print "*** %i doms is too many: capping at %i" % (NUM_DOMS, MAX_DOMS) NUM_DOMS = MAX_DOMS if verbose: print "Watch out! I'm trying to create %i DomUs!" % NUM_DOMS for d in range(0, NUM_DOMS): dom = XmTestDomain(name="11_create_%i" % d, extraConfig={"memory":MEM_PER_DOM}) try: cons = dom.start() except DomainError, e: if verbose: print str(e) FAIL("[%i] Failed to create domain" % d) try: cons.runCmd("ls") except ConsoleError, e: FAIL("[%i] Failed to attach console to %s" % (d, dom.getName())) domains.append(dom) console.append(cons) if verbose: print "[%i] Started %s" % (d, dom.getName()) # If we make it here, we will test several of the DomUs consoles for i in range(0,5): c = random.randint(0, NUM_DOMS-1) if verbose: print "Testing console of %s" % domains[c].getName() try: run = console[c].runCmd("ls") except ConsoleError, e: FAIL(str(e)) if run["return"] != 0: FAIL("'ls' returned invalid %i != 0" % run["return"])
gpl-2.0
sql-sith/cdc2015
simple_calculator_01_hard.py
1
1354
''' Shows one way to implement a solution to Challenge Exercise 4 (simple calculator) @author: sql.sith ''' def calcThis(int1, op, int2): if op == "+": return int1 + int2 elif op == "-": return int1 - int2 elif op == "*": return int1 * int2 elif op == "/": return float(int1) / float(int2) else: _INVALID_OPERATOR = "Invalid operator: " + op print("The only operators supported are: +-/*") raise(Exception(_INVALID_OPERATOR)) _debug = False print("Goal 1\n") typedInput = raw_input("Enter an arithmetic problem in the form INT1 OPERATOR INT2: ") #typedInputSaved = typedInput # get the first number, one hard way (there are several): firstNumberString = "" for ch in typedInput: if ch != " ": firstNumberString += ch else: # this exits the loop: break firstNumber = int(firstNumberString) # trim the first number and the first space: typedInput = typedInput[len(firstNumberString) + 1:] operator = typedInput[:1] # trim operator and second space: typedInput = typedInput[2:] # what's left is the second number: secondNumber = int(typedInput) if _debug: print("firstNumber: " + str(firstNumber)) print("operator: " + operator) print("secondNumber: " + str(secondNumber)) print calcThis(firstNumber, operator, secondNumber)
gpl-3.0
ted-gould/nova
nova/tests/unit/cells/test_cells_state_manager.py
17
11725
# Copyright (c) 2013 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests For CellStateManager """ import time import mock from oslo_config import cfg from oslo_db import exception as db_exc import six from nova.cells import state from nova import db from nova.db.sqlalchemy import models from nova import exception from nova import objects from nova import test from nova import utils FAKE_COMPUTES = [ ('host1', 1024, 100, 0, 0), ('host2', 1024, 100, -1, -1), ('host3', 1024, 100, 1024, 100), ('host4', 1024, 100, 300, 30), ] FAKE_COMPUTES_N_TO_ONE = [ ('host1', 1024, 100, 0, 0), ('host1', 1024, 100, -1, -1), ('host2', 1024, 100, 1024, 100), ('host2', 1024, 100, 300, 30), ] # NOTE(alaski): It's important to have multiple types that end up having the # same memory and disk requirements. So two types need the same first value, # and two need the second and third values to add up to the same thing. FAKE_ITYPES = [ (0, 0, 0), (50, 12, 13), (50, 2, 4), (10, 20, 5), ] def _create_fake_node(host, total_mem, total_disk, free_mem, free_disk): return objects.ComputeNode(host=host, memory_mb=total_mem, local_gb=total_disk, free_ram_mb=free_mem, free_disk_gb=free_disk) @classmethod def _fake_service_get_all_by_binary(cls, context, binary): def _node(host, total_mem, total_disk, free_mem, free_disk): return objects.Service(host=host, disabled=False) return [_node(*fake) for fake in FAKE_COMPUTES] @classmethod def _fake_compute_node_get_all(cls, context): return [_create_fake_node(*fake) for fake in FAKE_COMPUTES] @classmethod def _fake_compute_node_n_to_one_get_all(cls, context): return [_create_fake_node(*fake) for fake in FAKE_COMPUTES_N_TO_ONE] def _fake_cell_get_all(context): return [] def _fake_instance_type_all(context): def _type(mem, root, eph): return {'root_gb': root, 'ephemeral_gb': eph, 'memory_mb': mem} return [_type(*fake) for fake in FAKE_ITYPES] class TestCellsStateManager(test.NoDBTestCase): def setUp(self): super(TestCellsStateManager, self).setUp() self.stubs.Set(objects.ComputeNodeList, 'get_all', _fake_compute_node_get_all) self.stubs.Set(objects.ServiceList, 'get_by_binary', _fake_service_get_all_by_binary) self.stubs.Set(db, 'flavor_get_all', _fake_instance_type_all) self.stubs.Set(db, 'cell_get_all', _fake_cell_get_all) def test_cells_config_not_found(self): self.flags(cells_config='no_such_file_exists.conf', group='cells') e = self.assertRaises(cfg.ConfigFilesNotFoundError, state.CellStateManager) self.assertEqual(['no_such_file_exists.conf'], e.config_files) @mock.patch.object(cfg.ConfigOpts, 'find_file') @mock.patch.object(utils, 'read_cached_file') def test_filemanager_returned(self, mock_read_cached_file, mock_find_file): mock_find_file.return_value = "/etc/nova/cells.json" mock_read_cached_file.return_value = (False, six.StringIO({})) self.flags(cells_config='cells.json', group='cells') manager = state.CellStateManager() self.assertIsInstance(manager, state.CellStateManagerFile) self.assertRaises(exception.CellsUpdateUnsupported, manager.cell_create, None, None) self.assertRaises(exception.CellsUpdateUnsupported, manager.cell_update, None, None, None) self.assertRaises(exception.CellsUpdateUnsupported, manager.cell_delete, None, None) def test_dbmanager_returned(self): self.assertIsInstance(state.CellStateManager(), state.CellStateManagerDB) def test_capacity_no_reserve(self): # utilize entire cell cap = self._capacity(0.0) cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES) self.assertEqual(cell_free_ram, cap['ram_free']['total_mb']) cell_free_disk = 1024 * sum(compute[4] for compute in FAKE_COMPUTES) self.assertEqual(cell_free_disk, cap['disk_free']['total_mb']) self.assertEqual(0, cap['ram_free']['units_by_mb']['0']) self.assertEqual(0, cap['disk_free']['units_by_mb']['0']) units = cell_free_ram / 50 self.assertEqual(units, cap['ram_free']['units_by_mb']['50']) sz = 25 * 1024 units = 5 # 4 on host 3, 1 on host4 self.assertEqual(units, cap['disk_free']['units_by_mb'][str(sz)]) def test_capacity_full_reserve(self): # reserve the entire cell. (utilize zero percent) cap = self._capacity(100.0) cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES) self.assertEqual(cell_free_ram, cap['ram_free']['total_mb']) cell_free_disk = 1024 * sum(compute[4] for compute in FAKE_COMPUTES) self.assertEqual(cell_free_disk, cap['disk_free']['total_mb']) self.assertEqual(0, cap['ram_free']['units_by_mb']['0']) self.assertEqual(0, cap['disk_free']['units_by_mb']['0']) self.assertEqual(0, cap['ram_free']['units_by_mb']['50']) sz = 25 * 1024 self.assertEqual(0, cap['disk_free']['units_by_mb'][str(sz)]) def test_capacity_part_reserve(self): # utilize half the cell's free capacity cap = self._capacity(50.0) cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES) self.assertEqual(cell_free_ram, cap['ram_free']['total_mb']) cell_free_disk = 1024 * sum(compute[4] for compute in FAKE_COMPUTES) self.assertEqual(cell_free_disk, cap['disk_free']['total_mb']) self.assertEqual(0, cap['ram_free']['units_by_mb']['0']) self.assertEqual(0, cap['disk_free']['units_by_mb']['0']) units = 10 # 10 from host 3 self.assertEqual(units, cap['ram_free']['units_by_mb']['50']) sz = 25 * 1024 units = 2 # 2 on host 3 self.assertEqual(units, cap['disk_free']['units_by_mb'][str(sz)]) def _get_state_manager(self, reserve_percent=0.0): self.flags(reserve_percent=reserve_percent, group='cells') return state.CellStateManager() def _capacity(self, reserve_percent): state_manager = self._get_state_manager(reserve_percent) my_state = state_manager.get_my_state() return my_state.capacities class TestCellsStateManagerNToOne(TestCellsStateManager): def setUp(self): super(TestCellsStateManagerNToOne, self).setUp() self.stubs.Set(objects.ComputeNodeList, 'get_all', _fake_compute_node_n_to_one_get_all) def test_capacity_part_reserve(self): # utilize half the cell's free capacity cap = self._capacity(50.0) cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES_N_TO_ONE) self.assertEqual(cell_free_ram, cap['ram_free']['total_mb']) cell_free_disk = (1024 * sum(compute[4] for compute in FAKE_COMPUTES_N_TO_ONE)) self.assertEqual(cell_free_disk, cap['disk_free']['total_mb']) self.assertEqual(0, cap['ram_free']['units_by_mb']['0']) self.assertEqual(0, cap['disk_free']['units_by_mb']['0']) units = 6 # 6 from host 2 self.assertEqual(units, cap['ram_free']['units_by_mb']['50']) sz = 25 * 1024 units = 1 # 1 on host 2 self.assertEqual(units, cap['disk_free']['units_by_mb'][str(sz)]) class TestCellStateManagerException(test.NoDBTestCase): @mock.patch.object(time, 'sleep') def test_init_db_error(self, mock_sleep): class TestCellStateManagerDB(state.CellStateManagerDB): def __init__(self): self._cell_data_sync = mock.Mock() self._cell_data_sync.side_effect = [db_exc.DBError(), []] super(TestCellStateManagerDB, self).__init__() test = TestCellStateManagerDB() mock_sleep.assert_called_once_with(30) self.assertEqual(2, test._cell_data_sync.call_count) class TestCellsGetCapacity(TestCellsStateManager): def setUp(self): super(TestCellsGetCapacity, self).setUp() self.capacities = {"ram_free": 1234} self.state_manager = self._get_state_manager() cell = models.Cell(name="cell_name") other_cell = models.Cell(name="other_cell_name") cell.capacities = self.capacities other_cell.capacities = self.capacities self.stubs.Set(self.state_manager, 'child_cells', {"cell_name": cell, "other_cell_name": other_cell}) def test_get_cell_capacity_for_all_cells(self): self.stubs.Set(self.state_manager.my_cell_state, 'capacities', self.capacities) capacities = self.state_manager.get_capacities() self.assertEqual({"ram_free": 3702}, capacities) def test_get_cell_capacity_for_the_parent_cell(self): self.stubs.Set(self.state_manager.my_cell_state, 'capacities', self.capacities) capacities = self.state_manager.\ get_capacities(self.state_manager.my_cell_state.name) self.assertEqual({"ram_free": 3702}, capacities) def test_get_cell_capacity_for_a_cell(self): self.assertEqual(self.capacities, self.state_manager.get_capacities(cell_name="cell_name")) def test_get_cell_capacity_for_non_existing_cell(self): self.assertRaises(exception.CellNotFound, self.state_manager.get_capacities, cell_name="invalid_cell_name") class FakeCellStateManager(object): def __init__(self): self.called = [] def _cell_data_sync(self, force=False): self.called.append(('_cell_data_sync', force)) class TestSyncDecorators(test.NoDBTestCase): def test_sync_before(self): manager = FakeCellStateManager() def test(inst, *args, **kwargs): self.assertEqual(manager, inst) self.assertEqual((1, 2, 3), args) self.assertEqual(dict(a=4, b=5, c=6), kwargs) return 'result' wrapper = state.sync_before(test) result = wrapper(manager, 1, 2, 3, a=4, b=5, c=6) self.assertEqual('result', result) self.assertEqual([('_cell_data_sync', False)], manager.called) def test_sync_after(self): manager = FakeCellStateManager() def test(inst, *args, **kwargs): self.assertEqual(manager, inst) self.assertEqual((1, 2, 3), args) self.assertEqual(dict(a=4, b=5, c=6), kwargs) return 'result' wrapper = state.sync_after(test) result = wrapper(manager, 1, 2, 3, a=4, b=5, c=6) self.assertEqual('result', result) self.assertEqual([('_cell_data_sync', True)], manager.called)
apache-2.0
JingJunYin/tensorflow
tensorflow/python/ops/sparse_ops.py
8
80714
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=g-short-docstring-punctuation """Sparse Tensor Representation. See the @{$python/sparse_ops} guide. @@SparseTensor @@SparseTensorValue @@sparse_to_dense @@sparse_tensor_to_dense @@sparse_to_indicator @@sparse_merge @@sparse_concat @@sparse_reorder @@sparse_reshape @@sparse_slice @@sparse_split @@sparse_retain @@sparse_reset_shape @@sparse_fill_empty_rows @@sparse_transpose @@sparse_reduce_max @@sparse_reduce_max_sparse @@sparse_reduce_sum @@sparse_reduce_sum_sparse @@sparse_add @@sparse_softmax @@sparse_tensor_dense_matmul @@sparse_maximum @@sparse_minimum """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import numbers import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_sparse_ops from tensorflow.python.ops import math_ops # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.gen_sparse_ops import * # pylint: enable=wildcard-import from tensorflow.python.util import deprecation def _convert_to_sparse_tensor(sp_input): """Convert `sp_input` to `SparseTensor` and return it. Args: sp_input: `SparseTensor` or `SparseTensorValue`. Returns: `sp_input` converted to `SparseTensor`. Raises: ValueError: if `sp_input` is neither `SparseTensor` nor `SparseTensorValue`. """ if isinstance(sp_input, sparse_tensor.SparseTensorValue): return sparse_tensor.SparseTensor.from_value(sp_input) if not isinstance(sp_input, sparse_tensor.SparseTensor): raise TypeError("Input must be a SparseTensor.") return sp_input def _convert_to_sparse_tensors(sp_inputs): """Convert `sp_inputs` to `SparseTensor` objects and return them. Args: sp_inputs: `list` or `tuple` of `SparseTensor` or `SparseTensorValue` objects. Returns: `sp_inputs` converted to `SparseTensor` objects. Raises: ValueError: if any item in `sp_inputs` is neither `SparseTensor` nor `SparseTensorValue`. """ if isinstance(sp_inputs, list): return [_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs] if isinstance(sp_inputs, tuple): return (_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs) raise TypeError("Inputs must be a list or tuple.") # pylint: disable=protected-access def sparse_concat(axis, sp_inputs, name=None, expand_nonconcat_dim=False, concat_dim=None): """Concatenates a list of `SparseTensor` along the specified dimension. Concatenation is with respect to the dense versions of each sparse input. It is assumed that each inputs is a `SparseTensor` whose elements are ordered along increasing dimension number. If expand_nonconcat_dim is False, all inputs' shapes must match, except for the concat dimension. If expand_nonconcat_dim is True, then inputs' shapes are allowed to vary among all inputs. The `indices`, `values`, and `shapes` lists must have the same length. If expand_nonconcat_dim is False, then the output shape is identical to the inputs', except along the concat dimension, where it is the sum of the inputs' sizes along that dimension. If expand_nonconcat_dim is True, then the output shape along the non-concat dimensions will be expand to be the largest among all inputs, and it is the sum of the inputs sizes along the concat dimension. The output elements will be resorted to preserve the sort order along increasing dimension number. This op runs in `O(M log M)` time, where `M` is the total number of non-empty values across all inputs. This is due to the need for an internal sort in order to concatenate efficiently across an arbitrary dimension. For example, if `axis = 1` and the inputs are sp_inputs[0]: shape = [2, 3] [0, 2]: "a" [1, 0]: "b" [1, 1]: "c" sp_inputs[1]: shape = [2, 4] [0, 1]: "d" [0, 2]: "e" then the output will be shape = [2, 7] [0, 2]: "a" [0, 4]: "d" [0, 5]: "e" [1, 0]: "b" [1, 1]: "c" Graphically this is equivalent to doing [ a] concat [ d e ] = [ a d e ] [b c ] [ ] [b c ] Another example, if 'axis = 1' and the inputs are sp_inputs[0]: shape = [3, 3] [0, 2]: "a" [1, 0]: "b" [2, 1]: "c" sp_inputs[1]: shape = [2, 4] [0, 1]: "d" [0, 2]: "e" if expand_nonconcat_dim = False, this will result in an error. But if expand_nonconcat_dim = True, this will result in: shape = [3, 7] [0, 2]: "a" [0, 4]: "d" [0, 5]: "e" [1, 0]: "b" [2, 1]: "c" Graphically this is equivalent to doing [ a] concat [ d e ] = [ a d e ] [b ] [ ] [b ] [ c ] [ c ] Args: axis: Dimension to concatenate along. Must be in range [-rank, rank), where rank is the number of dimensions in each input `SparseTensor`. sp_inputs: List of `SparseTensor` to concatenate. name: A name prefix for the returned tensors (optional). expand_nonconcat_dim: Whether to allow the expansion in the non-concat dimensions. Defaulted to False. concat_dim: The old (deprecated) name for axis. Returns: A `SparseTensor` with the concatenated output. Raises: TypeError: If `sp_inputs` is not a list of `SparseTensor`. """ axis = deprecation.deprecated_argument_lookup("axis", axis, "concat_dim", concat_dim) sp_inputs = _convert_to_sparse_tensors(sp_inputs) if len(sp_inputs) == 1: # Degenerate case of one tensor. return sp_inputs[0] inds = [sp_input.indices for sp_input in sp_inputs] vals = [sp_input.values for sp_input in sp_inputs] shapes = [sp_input.dense_shape for sp_input in sp_inputs] if expand_nonconcat_dim: max_shape = math_ops.reduce_max( array_ops.concat( [array_ops.reshape(shape, [1, -1]) for shape in shapes], 0), 0) shapes = [ array_ops.concat([ max_shape[:axis], shape[-1:] if axis == -1 else shape[axis:axis + 1], [] if axis == -1 else max_shape[axis + 1:] ], 0) for shape in shapes ] output_ind, output_val, output_shape = (gen_sparse_ops._sparse_concat( inds, vals, shapes, axis, name=name)) return sparse_tensor.SparseTensor(output_ind, output_val, output_shape) def sparse_add(a, b, thresh=0): """Adds two tensors, at least one of each is a `SparseTensor`. If one `SparseTensor` and one `Tensor` are passed in, returns a `Tensor`. If both arguments are `SparseTensor`s, this returns a `SparseTensor`. The order of arguments does not matter. Use vanilla `tf.add()` for adding two dense `Tensor`s. The shapes of the two operands must match: broadcasting is not supported. The indices of any input `SparseTensor` are assumed ordered in standard lexicographic order. If this is not the case, before this step run `SparseReorder` to restore index ordering. If both arguments are sparse, we perform "clipping" as follows. By default, if two values sum to zero at some index, the output `SparseTensor` would still include that particular location in its index, storing a zero in the corresponding value slot. To override this, callers can specify `thresh`, indicating that if the sum has a magnitude strictly smaller than `thresh`, its corresponding value and index would then not be included. In particular, `thresh == 0.0` (default) means everything is kept and actual thresholding happens only for a positive value. For example, suppose the logical sum of two sparse operands is (densified): [ 2] [.1 0] [ 6 -.2] Then, * `thresh == 0` (the default): all 5 index/value pairs will be returned. * `thresh == 0.11`: only .1 and 0 will vanish, and the remaining three index/value pairs will be returned. * `thresh == 0.21`: .1, 0, and -.2 will vanish. Args: a: The first operand; `SparseTensor` or `Tensor`. b: The second operand; `SparseTensor` or `Tensor`. At least one operand must be sparse. thresh: A 0-D `Tensor`. The magnitude threshold that determines if an output value/index pair takes space. Its dtype should match that of the values if they are real; if the latter are complex64/complex128, then the dtype should be float32/float64, correspondingly. Returns: A `SparseTensor` or a `Tensor`, representing the sum. Raises: TypeError: If both `a` and `b` are `Tensor`s. Use `tf.add()` instead. """ sparse_classes = (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue) if not any(isinstance(inp, sparse_classes) for inp in [a, b]): raise TypeError("At least one input should be SparseTensor; do you mean to" " use tf.add()?") if all(isinstance(inp, sparse_classes) for inp in [a, b]): a = _convert_to_sparse_tensor(a) b = _convert_to_sparse_tensor(b) thresh = ops.convert_to_tensor( thresh, dtype=a.values.dtype.real_dtype.base_dtype, name="thresh") output_ind, output_val, output_shape = (gen_sparse_ops._sparse_add( a.indices, a.values, a.dense_shape, b.indices, b.values, b.dense_shape, thresh)) # Attempt to get output_shape statically. a.get_shape().assert_is_compatible_with(b.get_shape()) static_shape = array_ops.broadcast_static_shape( a.get_shape(), b.get_shape()) if static_shape.is_fully_defined(): output_shape = static_shape.as_list() return sparse_tensor.SparseTensor(output_ind, output_val, output_shape) else: # swap to make `a` the SparseTensor. if isinstance(b, sparse_classes): a, b = b, a return gen_sparse_ops._sparse_tensor_dense_add( a.indices, a.values, a.dense_shape, b) def _sparse_cross(inputs, name=None): """Generates sparse cross from a list of sparse and dense tensors. For example, if the inputs are * inputs[0]: SparseTensor with shape = [2, 2] [0, 0]: "a" [1, 0]: "b" [1, 1]: "c" * inputs[1]: SparseTensor with shape = [2, 1] [0, 0]: "d" [1, 0]: "e" * inputs[2]: Tensor [["f"], ["g"]] then the output will be: shape = [2, 2] [0, 0]: "a_X_d_X_f" [1, 0]: "b_X_e_X_g" [1, 1]: "c_X_e_X_g" Args: inputs: An iterable of `Tensor` or `SparseTensor`. name: Optional name for the op. Returns: A `SparseTensor` of type `string`. """ return _sparse_cross_internal(inputs=inputs, hashed_output=False, name=name) def _sparse_cross_hashed(inputs, num_buckets=0, hash_key=None, name=None): """Generates hashed sparse cross from a list of sparse and dense tensors. For example, if the inputs are * inputs[0]: SparseTensor with shape = [2, 2] [0, 0]: "a" [1, 0]: "b" [1, 1]: "c" * inputs[1]: SparseTensor with shape = [2, 1] [0, 0]: "d" [1, 0]: "e" * inputs[2]: Tensor [["f"], ["g"]] then the output will be: shape = [2, 2] [0, 0]: FingerprintCat64( Fingerprint64("f"), FingerprintCat64( Fingerprint64("d"), Fingerprint64("a"))) [1, 0]: FingerprintCat64( Fingerprint64("g"), FingerprintCat64( Fingerprint64("e"), Fingerprint64("b"))) [1, 1]: FingerprintCat64( Fingerprint64("g"), FingerprintCat64( Fingerprint64("e"), Fingerprint64("c"))) Args: inputs: An iterable of `Tensor` or `SparseTensor`. num_buckets: An `int` that is `>= 0`. output = hashed_value%num_buckets if num_buckets > 0 else hashed_value. hash_key: Integer hash_key that will be used by the `FingerprintCat64` function. If not given, will use a default key. name: Optional name for the op. Returns: A `SparseTensor` of type `int64`. """ return _sparse_cross_internal( inputs=inputs, hashed_output=True, num_buckets=num_buckets, hash_key=hash_key, name=name) _DEFAULT_HASH_KEY = 0xDECAFCAFFE def _sparse_cross_internal( inputs, hashed_output=False, num_buckets=0, hash_key=None, name=None): """See gen_sparse_ops._sparse_cross.""" if not isinstance(inputs, list): raise TypeError("Inputs must be a list") if not all(isinstance(i, sparse_tensor.SparseTensor) or isinstance(i, ops.Tensor) for i in inputs): raise TypeError("All inputs must be SparseTensors") sparse_inputs = [i for i in inputs if isinstance(i, sparse_tensor.SparseTensor)] dense_inputs = [i for i in inputs if not isinstance(i, sparse_tensor.SparseTensor)] indices = [sp_input.indices for sp_input in sparse_inputs] values = [sp_input.values for sp_input in sparse_inputs] shapes = [sp_input.dense_shape for sp_input in sparse_inputs] out_type = dtypes.int64 if hashed_output else dtypes.string internal_type = dtypes.string for i in range(len(values)): if values[i].dtype != dtypes.string: values[i] = math_ops.to_int64(values[i]) internal_type = dtypes.int64 for i in range(len(dense_inputs)): if dense_inputs[i].dtype != dtypes.string: dense_inputs[i] = math_ops.to_int64(dense_inputs[i]) internal_type = dtypes.int64 indices_out, values_out, shape_out = gen_sparse_ops._sparse_cross( indices=indices, values=values, shapes=shapes, dense_inputs=dense_inputs, hashed_output=hashed_output, num_buckets=num_buckets, hash_key=hash_key or _DEFAULT_HASH_KEY, out_type=out_type, internal_type=internal_type, name=name) return sparse_tensor.SparseTensor(indices_out, values_out, shape_out) def sparse_dense_cwise_add(sp_t, dense_t): """Adds up a SparseTensor and a dense Tensor, using these special rules: (1) Broadcasts the dense side to have the same shape as the sparse side, if eligible; (2) Then, only the dense values pointed to by the indices of the SparseTensor participate in the cwise addition. By the rules, the result is a logical SparseTensor with exactly the same indices and shape, but possibly with different non-zero values. The output of this Op is the resultant non-zero values. Args: sp_t: the SparseTensor operand. dense_t: the dense Tensor operand; must have the same dtype and a broadcast-compatible shape as `sp_t`. Returns: output: the SparseTensor output. """ result = gen_sparse_ops.sparse_dense_cwise_add(sp_t.indices, sp_t.values, sp_t.dense_shape, dense_t) return sparse_tensor.SparseTensor(sp_t.indices, result, sp_t.dense_shape) def sparse_reorder(sp_input, name=None): """Reorders a `SparseTensor` into the canonical, row-major ordering. Note that by convention, all sparse ops preserve the canonical ordering along increasing dimension number. The only time ordering can be violated is during manual manipulation of the indices and values to add entries. Reordering does not affect the shape of the `SparseTensor`. For example, if `sp_input` has shape `[4, 5]` and `indices` / `values`: [0, 3]: b [0, 1]: a [3, 1]: d [2, 0]: c then the output will be a `SparseTensor` of shape `[4, 5]` and `indices` / `values`: [0, 1]: a [0, 3]: b [2, 0]: c [3, 1]: d Args: sp_input: The input `SparseTensor`. name: A name prefix for the returned tensors (optional) Returns: A `SparseTensor` with the same shape and non-empty values, but in canonical ordering. Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) reordered_ind, reordered_val = (gen_sparse_ops._sparse_reorder( sp_input.indices, sp_input.values, sp_input.dense_shape, name=name)) if sp_input.get_shape().is_fully_defined(): dense_shape = sp_input.get_shape().as_list() else: dense_shape = array_ops.identity(sp_input.dense_shape) return sparse_tensor.SparseTensor(reordered_ind, reordered_val, dense_shape) def sparse_reshape(sp_input, shape, name=None): """Reshapes a `SparseTensor` to represent values in a new dense shape. This operation has the same semantics as `reshape` on the represented dense tensor. The indices of non-empty values in `sp_input` are recomputed based on the new dense shape, and a new `SparseTensor` is returned containing the new indices and new shape. The order of non-empty values in `sp_input` is unchanged. If one component of `shape` is the special value -1, the size of that dimension is computed so that the total dense size remains constant. At most one component of `shape` can be -1. The number of dense elements implied by `shape` must be the same as the number of dense elements originally represented by `sp_input`. For example, if `sp_input` has shape `[2, 3, 6]` and `indices` / `values`: [0, 0, 0]: a [0, 0, 1]: b [0, 1, 0]: c [1, 0, 0]: d [1, 2, 3]: e and `shape` is `[9, -1]`, then the output will be a `SparseTensor` of shape `[9, 4]` and `indices` / `values`: [0, 0]: a [0, 1]: b [1, 2]: c [4, 2]: d [8, 1]: e Args: sp_input: The input `SparseTensor`. shape: A 1-D (vector) int64 `Tensor` specifying the new dense shape of the represented `SparseTensor`. name: A name prefix for the returned tensors (optional) Returns: A `SparseTensor` with the same non-empty values but with indices calculated by the new dense shape. Raises: TypeError: If `sp_input` is not a `SparseTensor`. ValueError: If argument `shape` requests a `SparseTensor` with a different number of elements than `sp_input`. ValueError: If `shape` has more than one inferred (== -1) dimension. """ sp_input = _convert_to_sparse_tensor(sp_input) shape = math_ops.cast(shape, dtype=dtypes.int64) with ops.name_scope(name, "SparseReshape", [sp_input]) as name: reshaped_ind, reshaped_shape = gen_sparse_ops._sparse_reshape( sp_input.indices, sp_input.dense_shape, shape, name=name) reshaped_shape_const = tensor_util.constant_value(shape) if (reshaped_shape_const is not None and sp_input.get_shape().is_fully_defined()): num_implied = sum((dim == -1) for dim in reshaped_shape_const) if num_implied > 1: raise ValueError("At most one dimension can be inferred (-1). Found: %s" % reshaped_shape_const) original_reshaped_shape = list(reshaped_shape_const) # Copy. in_shape_size = np.prod(sp_input.get_shape().as_list()) if num_implied: implied_idx = original_reshaped_shape.index(-1) non_implied_idx = ( original_reshaped_shape[:implied_idx] + original_reshaped_shape[implied_idx + 1:]) reshaped_shape_const[implied_idx] = ( in_shape_size // np.prod(non_implied_idx)) reshaped_size = np.prod(reshaped_shape_const) if reshaped_size != in_shape_size: raise ValueError( "Cannot reshape a tensor with %d elements to shape %s " "(%d elements)." % (in_shape_size, original_reshaped_shape, reshaped_size)) reshaped_shape = reshaped_shape_const return sparse_tensor.SparseTensor( reshaped_ind, array_ops.identity(sp_input.values), reshaped_shape) # TODO(aselle): Remove keyword required once for 1.0 final class KeywordRequired(object): def __repr__(self): # This is needed to make documentation without fully qualified module paths return "KeywordRequired()" def sparse_split(keyword_required=KeywordRequired(), sp_input=None, num_split=None, axis=None, name=None, split_dim=None): """Split a `SparseTensor` into `num_split` tensors along `axis`. If the `sp_input.dense_shape[axis]` is not an integer multiple of `num_split` each slice starting from 0:`shape[axis] % num_split` gets extra one dimension. For example, if `axis = 1` and `num_split = 2` and the input is: input_tensor = shape = [2, 7] [ a d e ] [b c ] Graphically the output tensors are: output_tensor[0] = [ a ] [b c ] output_tensor[1] = [ d e ] [ ] Args: keyword_required: Python 2 standin for * (temporary for argument reorder) sp_input: The `SparseTensor` to split. num_split: A Python integer. The number of ways to split. axis: A 0-D `int32` `Tensor`. The dimension along which to split. name: A name for the operation (optional). split_dim: Deprecated old name for axis. Returns: `num_split` `SparseTensor` objects resulting from splitting `value`. Raises: TypeError: If `sp_input` is not a `SparseTensor`. ValueError: If the deprecated `split_dim` and `axis` are both non None. """ if not isinstance(keyword_required, KeywordRequired): raise ValueError("Keyword arguments are required for this function.") if sp_input is None: raise ValueError("sp_input is required") if num_split is None: raise ValueError("num_split is required") if axis is None: raise ValueError("axis is required") axis = deprecation.deprecated_argument_lookup("axis", axis, "split_dim", split_dim) sp_input = _convert_to_sparse_tensor(sp_input) output_inds, output_vals, output_shapes = (gen_sparse_ops._sparse_split( axis, sp_input.indices, sp_input.values, sp_input.dense_shape, num_split, name=name)) sparse_tensors = [] for i in range(0, num_split): sparse_tensors.append( sparse_tensor.SparseTensor( output_inds[i], output_vals[i], output_shapes[i])) return sparse_tensors def sparse_slice(sp_input, start, size, name=None): """Slice a `SparseTensor` based on the `start` and `size. For example, if the input is input_tensor = shape = [2, 7] [ a d e ] [b c ] Graphically the output tensors are: sparse_slice([0, 0], [2, 4]) = shape = [2, 4] [ a ] [b c ] sparse_slice([0, 4], [2, 3]) = shape = [2, 3] [ d e ] [ ] Args: sp_input: The `SparseTensor` to split. start: 1-D. tensor represents the start of the slice. size: 1-D. tensor represents the size of the slice. name: A name for the operation (optional). Returns: A `SparseTensor` objects resulting from splicing. Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) start = ops.convert_to_tensor(start, dtypes.int64) size = ops.convert_to_tensor(size, dtypes.int64) with ops.name_scope(name, "SparseSlice", [sp_input]) as name: output_indices, output_values, output_shape = gen_sparse_ops.sparse_slice( sp_input.indices, sp_input.values, sp_input.dense_shape, start, size, name=name) return sparse_tensor.SparseTensor( output_indices, output_values, output_shape) def sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value=0, validate_indices=True, name=None): """Converts a sparse representation into a dense tensor. Builds an array `dense` with shape `output_shape` such that ```python # If sparse_indices is scalar dense[i] = (i == sparse_indices ? sparse_values : default_value) # If sparse_indices is a vector, then for each i dense[sparse_indices[i]] = sparse_values[i] # If sparse_indices is an n by d matrix, then for each i in [0, n) dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i] ``` All other values in `dense` are set to `default_value`. If `sparse_values` is a scalar, all sparse indices are set to this single value. Indices should be sorted in lexicographic order, and indices must not contain any repeats. If `validate_indices` is True, these properties are checked during execution. Args: sparse_indices: A 0-D, 1-D, or 2-D `Tensor` of type `int32` or `int64`. `sparse_indices[i]` contains the complete index where `sparse_values[i]` will be placed. output_shape: A 1-D `Tensor` of the same type as `sparse_indices`. Shape of the dense output tensor. sparse_values: A 0-D or 1-D `Tensor`. Values corresponding to each row of `sparse_indices`, or a scalar value to be used for all sparse indices. default_value: A 0-D `Tensor` of the same type as `sparse_values`. Value to set for indices not specified in `sparse_indices`. Defaults to zero. validate_indices: A boolean value. If True, indices are checked to make sure they are sorted in lexicographic order and that there are no repeats. name: A name for the operation (optional). Returns: Dense `Tensor` of shape `output_shape`. Has the same type as `sparse_values`. """ return gen_sparse_ops._sparse_to_dense( sparse_indices, output_shape, sparse_values, default_value=default_value, validate_indices=validate_indices, name=name) def sparse_reduce_max(sp_input, axis=None, keep_dims=False, reduction_axes=None): """Computes the max of elements across dimensions of a SparseTensor. This Op takes a SparseTensor and is the sparse counterpart to `tf.reduce_max()`. In particular, this Op also returns a dense `Tensor` instead of a sparse one. Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained with length 1. If `reduction_axes` has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, similar to the indexing rules in Python. For example: ```python # 'x' represents [[1, ?, 2] # [?, 3, ?]] # where ? is implicitly-zero. tf.sparse_reduce_max(x) ==> 3 tf.sparse_reduce_max(x, 0) ==> [1, 3, 2] tf.sparse_reduce_max(x, 1) ==> [2, 3] # Can also use -1 as the axis. tf.sparse_reduce_max(x, 1, keep_dims=True) ==> [[2], [3]] tf.sparse_reduce_max(x, [0, 1]) ==> 3 ``` Args: sp_input: The SparseTensor to reduce. Should have numeric type. axis: The dimensions to reduce; list or scalar. If `None` (the default), reduces all dimensions. keep_dims: If true, retain reduced dimensions with length 1. reduction_axes: Deprecated name of axis. Returns: The reduced Tensor. """ return gen_sparse_ops.sparse_reduce_max( sp_input.indices, sp_input.values, sp_input.dense_shape, math_ops._ReductionDims(sp_input, axis, reduction_axes), keep_dims) def sparse_reduce_max_sparse(sp_input, axis=None, keep_dims=False, reduction_axes=None): """Computes the max of elements across dimensions of a SparseTensor. This Op takes a SparseTensor and is the sparse counterpart to `tf.reduce_max()`. In contrast to SparseReduceSum, this Op returns a SparseTensor. Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained with length 1. If `reduction_axes` has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, which are interpreted according to the indexing rules in Python. Args: sp_input: The SparseTensor to reduce. Should have numeric type. axis: The dimensions to reduce; list or scalar. If `None` (the default), reduces all dimensions. keep_dims: If true, retain reduced dimensions with length 1. reduction_axes: Deprecated name of axis Returns: The reduced SparseTensor. """ output_ind, output_val, output_shape = ( gen_sparse_ops.sparse_reduce_max_sparse( sp_input.indices, sp_input.values, sp_input.dense_shape, math_ops._ReductionDims(sp_input, axis, reduction_axes), keep_dims)) return sparse_tensor.SparseTensor(output_ind, output_val, output_shape) def sparse_reduce_sum(sp_input, axis=None, keep_dims=False, reduction_axes=None): """Computes the sum of elements across dimensions of a SparseTensor. This Op takes a SparseTensor and is the sparse counterpart to `tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor` instead of a sparse one. Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained with length 1. If `reduction_axes` has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, similar to the indexing rules in Python. For example: ```python # 'x' represents [[1, ?, 1] # [?, 1, ?]] # where ? is implicitly-zero. tf.sparse_reduce_sum(x) ==> 3 tf.sparse_reduce_sum(x, 0) ==> [1, 1, 1] tf.sparse_reduce_sum(x, 1) ==> [2, 1] # Can also use -1 as the axis. tf.sparse_reduce_sum(x, 1, keep_dims=True) ==> [[2], [1]] tf.sparse_reduce_sum(x, [0, 1]) ==> 3 ``` Args: sp_input: The SparseTensor to reduce. Should have numeric type. axis: The dimensions to reduce; list or scalar. If `None` (the default), reduces all dimensions. keep_dims: If true, retain reduced dimensions with length 1. reduction_axes: Deprecated name of axis. Returns: The reduced Tensor. """ return gen_sparse_ops.sparse_reduce_sum( sp_input.indices, sp_input.values, sp_input.dense_shape, math_ops._ReductionDims(sp_input, axis, reduction_axes), keep_dims) def sparse_reduce_sum_sparse(sp_input, axis=None, keep_dims=False, reduction_axes=None): """Computes the sum of elements across dimensions of a SparseTensor. This Op takes a SparseTensor and is the sparse counterpart to `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a SparseTensor. Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained with length 1. If `reduction_axes` has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, which are interpreted according to the indexing rules in Python. Args: sp_input: The SparseTensor to reduce. Should have numeric type. axis: The dimensions to reduce; list or scalar. If `None` (the default), reduces all dimensions. keep_dims: If true, retain reduced dimensions with length 1. reduction_axes: Deprecated name of axis Returns: The reduced SparseTensor. """ output_ind, output_val, output_shape = ( gen_sparse_ops.sparse_reduce_sum_sparse( sp_input.indices, sp_input.values, sp_input.dense_shape, math_ops._ReductionDims(sp_input, axis, reduction_axes), keep_dims)) return sparse_tensor.SparseTensor(output_ind, output_val, output_shape) def sparse_tensor_to_dense(sp_input, default_value=0, validate_indices=True, name=None): """Converts a `SparseTensor` into a dense tensor. This op is a convenience wrapper around `sparse_to_dense` for `SparseTensor`s. For example, if `sp_input` has shape `[3, 5]` and non-empty string values: [0, 1]: a [0, 3]: b [2, 0]: c and `default_value` is `x`, then the output will be a dense `[3, 5]` string tensor with values: [[x a x b x] [x x x x x] [c x x x x]] Indices must be without repeats. This is only tested if validate_indices is True. Args: sp_input: The input `SparseTensor`. default_value: Scalar value to set for indices not specified in `sp_input`. Defaults to zero. validate_indices: A boolean value. If `True`, indices are checked to make sure they are sorted in lexicographic order and that there are no repeats. name: A name prefix for the returned tensors (optional). Returns: A dense tensor with shape `sp_input.dense_shape` and values specified by the non-empty values in `sp_input`. Indices not in `sp_input` are assigned `default_value`. Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) return sparse_to_dense( sp_input.indices, sp_input.dense_shape, sp_input.values, default_value=default_value, validate_indices=validate_indices, name=name) def sparse_to_indicator(sp_input, vocab_size, name=None): """Converts a `SparseTensor` of ids into a dense bool indicator tensor. The last dimension of `sp_input.indices` is discarded and replaced with the values of `sp_input`. If `sp_input.dense_shape = [D0, D1, ..., Dn, K]`, then `output.shape = [D0, D1, ..., Dn, vocab_size]`, where output[d_0, d_1, ..., d_n, sp_input[d_0, d_1, ..., d_n, k]] = True and False elsewhere in `output`. For example, if `sp_input.dense_shape = [2, 3, 4]` with non-empty values: [0, 0, 0]: 0 [0, 1, 0]: 10 [1, 0, 3]: 103 [1, 1, 2]: 150 [1, 1, 3]: 149 [1, 1, 4]: 150 [1, 2, 1]: 121 and `vocab_size = 200`, then the output will be a `[2, 3, 200]` dense bool tensor with False everywhere except at positions (0, 0, 0), (0, 1, 10), (1, 0, 103), (1, 1, 149), (1, 1, 150), (1, 2, 121). Note that repeats are allowed in the input SparseTensor. This op is useful for converting `SparseTensor`s into dense formats for compatibility with ops that expect dense tensors. The input `SparseTensor` must be in row-major order. Args: sp_input: A `SparseTensor` with `values` property of type `int32` or `int64`. vocab_size: A scalar int64 Tensor (or Python int) containing the new size of the last dimension, `all(0 <= sp_input.values < vocab_size)`. name: A name prefix for the returned tensors (optional) Returns: A dense bool indicator tensor representing the indices with specified value. Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) with ops.name_scope(name, "SparseToIndicator", [sp_input]) as name: num_entries = array_ops.shape(sp_input.indices)[0] new_values = array_ops.fill(array_ops.expand_dims(num_entries, 0), True) sp_values = sparse_tensor.SparseTensor( sp_input.indices, new_values, sp_input.dense_shape) sp_new = sparse_merge(sp_input, sp_values, vocab_size, name) # validate_indices may be False because we allow duplicates in new_indices: # repeated indices are allowed when creating an indicator matrix. return sparse_tensor_to_dense( sp_new, default_value=False, validate_indices=False, name=name) def sparse_merge(sp_ids, sp_values, vocab_size, name=None, already_sorted=False): """Combines a batch of feature ids and values into a single `SparseTensor`. The most common use case for this function occurs when feature ids and their corresponding values are stored in `Example` protos on disk. `parse_example` will return a batch of ids and a batch of values, and this function joins them into a single logical `SparseTensor` for use in functions such as `sparse_tensor_dense_matmul`, `sparse_to_dense`, etc. The `SparseTensor` returned by this function has the following properties: - `indices` is equivalent to `sp_ids.indices` with the last dimension discarded and replaced with `sp_ids.values`. - `values` is simply `sp_values.values`. - If `sp_ids.dense_shape = [D0, D1, ..., Dn, K]`, then `output.shape = [D0, D1, ..., Dn, vocab_size]`. For example, consider the following feature vectors: ```python vector1 = [-3, 0, 0, 0, 0, 0] vector2 = [ 0, 1, 0, 4, 1, 0] vector3 = [ 5, 0, 0, 9, 0, 0] ``` These might be stored sparsely in the following Example protos by storing only the feature ids (column number if the vectors are treated as a matrix) of the non-zero elements and the corresponding values: ```python examples = [Example(features={ "ids": Feature(int64_list=Int64List(value=[0])), "values": Feature(float_list=FloatList(value=[-3]))}), Example(features={ "ids": Feature(int64_list=Int64List(value=[1, 4, 3])), "values": Feature(float_list=FloatList(value=[1, 1, 4]))}), Example(features={ "ids": Feature(int64_list=Int64List(value=[0, 3])), "values": Feature(float_list=FloatList(value=[5, 9]))})] ``` The result of calling parse_example on these examples will produce a dictionary with entries for "ids" and "values". Passing those two objects to this function along with vocab_size=6, will produce a `SparseTensor` that sparsely represents all three instances. Namely, the `indices` property will contain the coordinates of the non-zero entries in the feature matrix (the first dimension is the row number in the matrix, i.e., the index within the batch, and the second dimension is the column number, i.e., the feature id); `values` will contain the actual values. `shape` will be the shape of the original matrix, i.e., (3, 6). For our example above, the output will be equal to: ```python SparseTensor(indices=[[0, 0], [1, 1], [1, 3], [1, 4], [2, 0], [2, 3]], values=[-3, 1, 4, 1, 5, 9], dense_shape=[3, 6]) ``` This method generalizes to higher-dimensions by simply providing a list for both the sp_ids as well as the vocab_size. In this case the resulting `SparseTensor` has the following properties: - `indices` is equivalent to `sp_ids[0].indices` with the last dimension discarded and concatenated with `sp_ids[0].values, sp_ids[1].values, ...`. - `values` is simply `sp_values.values`. - If `sp_ids.dense_shape = [D0, D1, ..., Dn, K]`, then `output.shape = [D0, D1, ..., Dn] + vocab_size`. Args: sp_ids: A single `SparseTensor` with `values` property of type `int32` or `int64` or a Python list of such `SparseTensor`s or a list thereof. sp_values: A `SparseTensor` of any type. vocab_size: A scalar `int64` Tensor (or Python int) containing the new size of the last dimension, `all(0 <= sp_ids.values < vocab_size)`. Or a list thereof with `all(0 <= sp_ids[i].values < vocab_size[i])` for all `i`. name: A name prefix for the returned tensors (optional) already_sorted: A boolean to specify whether the per-batch values in `sp_values` are already sorted. If so skip sorting, False by default (optional). Returns: A `SparseTensor` compactly representing a batch of feature ids and values, useful for passing to functions that expect such a `SparseTensor`. Raises: TypeError: If `sp_values` is not a `SparseTensor`. Or if `sp_ids` is neither a `SparseTensor` nor a list thereof. Or if `vocab_size` is not a `Tensor` or a Python int and `sp_ids` is a `SparseTensor`. Or if `vocab_size` is not a or list thereof and `sp_ids` is a list. ValueError: If `sp_ids` and `vocab_size` are lists of different lengths. """ if isinstance(sp_ids, sparse_tensor.SparseTensorValue) or isinstance( sp_ids, sparse_tensor.SparseTensor): sp_ids = [sp_ids] if not (isinstance(vocab_size, ops.Tensor) or isinstance(vocab_size, numbers.Integral)): raise TypeError("vocab_size has to be a Tensor or Python int. Found %s" % type(vocab_size)) vocab_size = [vocab_size] else: if not isinstance(sp_ids, collections.Iterable): raise TypeError("sp_ids has to be a SparseTensor or list thereof. " "Found %s" % type(sp_ids)) if not isinstance(vocab_size, collections.Iterable): raise TypeError("vocab_size has to be a list of Tensors or Python ints. " "Found %s" % type(vocab_size)) for dim in vocab_size: if not (isinstance(dim, ops.Tensor) or isinstance(dim, numbers.Integral)): raise TypeError( "vocab_size has to be a list of Tensors or Python ints. Found %s" % type(dim)) if len(sp_ids) != len(vocab_size): raise ValueError("sp_ids and vocab_size have to have equal lengths.") with ops.name_scope(name, "SparseMerge", [sp_ids, sp_values]): sp_ids = [_convert_to_sparse_tensor(sp_ids_dim) for sp_ids_dim in sp_ids] sp_values = _convert_to_sparse_tensor(sp_values) ids = [] for sp_ids_dim in sp_ids: ids_dim = sp_ids_dim.values if sp_ids_dim.dtype != dtypes.int64: ids_dim = math_ops.cast(ids_dim, dtypes.int64) ids += [array_ops.expand_dims(ids_dim, axis=1)] vocab_size = [math_ops.cast(x, dtypes.int64) for x in vocab_size] # Slice off the last dimension of indices, then tack on the ids indices_columns_to_preserve = sp_ids[0].indices[:, :-1] new_indices = array_ops.concat([indices_columns_to_preserve] + ids, 1) new_values = sp_values.values new_shape = array_ops.concat([sp_ids[0].dense_shape[:-1], vocab_size], 0) result = sparse_tensor.SparseTensor(new_indices, new_values, new_shape) return result if already_sorted else sparse_reorder(result) def sparse_retain(sp_input, to_retain): """Retains specified non-empty values within a `SparseTensor`. For example, if `sp_input` has shape `[4, 5]` and 4 non-empty string values: [0, 1]: a [0, 3]: b [2, 0]: c [3, 1]: d and `to_retain = [True, False, False, True]`, then the output will be a `SparseTensor` of shape `[4, 5]` with 2 non-empty values: [0, 1]: a [3, 1]: d Args: sp_input: The input `SparseTensor` with `N` non-empty elements. to_retain: A bool vector of length `N` with `M` true values. Returns: A `SparseTensor` with the same shape as the input and `M` non-empty elements corresponding to the true positions in `to_retain`. Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) to_retain = ops.convert_to_tensor(to_retain) # Shape checking, if shape is known at graph construction time retain_shape = to_retain.get_shape() retain_shape.assert_has_rank(1) sp_input.values.get_shape()[0].merge_with(retain_shape[0]) where_true = array_ops.reshape(array_ops.where(to_retain), [-1]) new_indices = array_ops.gather(sp_input.indices, where_true) new_values = array_ops.gather(sp_input.values, where_true) return sparse_tensor.SparseTensor(new_indices, new_values, array_ops.identity(sp_input.dense_shape)) def sparse_reset_shape(sp_input, new_shape=None): """Resets the shape of a `SparseTensor` with indices and values unchanged. If `new_shape` is None, returns a copy of `sp_input` with its shape reset to the tight bounding box of `sp_input`. This will be a shape consisting of all zeros if sp_input has no values. If `new_shape` is provided, then it must be larger or equal in all dimensions compared to the shape of `sp_input`. When this condition is met, the returned SparseTensor will have its shape reset to `new_shape` and its indices and values unchanged from that of `sp_input.` For example: Consider a `sp_input` with shape [2, 3, 5]: [0, 0, 1]: a [0, 1, 0]: b [0, 2, 2]: c [1, 0, 3]: d - It is an error to set `new_shape` as [3, 7] since this represents a rank-2 tensor while `sp_input` is rank-3. This is either a ValueError during graph construction (if both shapes are known) or an OpError during run time. - Setting `new_shape` as [2, 3, 6] will be fine as this shape is larger or equal in every dimension compared to the original shape [2, 3, 5]. - On the other hand, setting new_shape as [2, 3, 4] is also an error: The third dimension is smaller than the original shape [2, 3, 5] (and an `InvalidArgumentError` will be raised). - If `new_shape` is None, the returned SparseTensor will have a shape [2, 3, 4], which is the tight bounding box of `sp_input`. Args: sp_input: The input `SparseTensor`. new_shape: None or a vector representing the new shape for the returned `SparseTensor`. Returns: A `SparseTensor` indices and values unchanged from `input_sp`. Its shape is `new_shape` if that is set. Otherwise it is the tight bounding box of `input_sp` Raises: TypeError: If `sp_input` is not a `SparseTensor`. ValueError: If `new_shape` represents a tensor with a different rank from that of `sp_input` (if shapes are known when graph is constructed). ValueError: If `new_shape` is determined during graph build to have dimension sizes that are too small. OpError: - If `new_shape` has dimension sizes that are too small. - If shapes are not known during graph construction time, and during run time it is found out that the ranks do not match. """ sp_input = _convert_to_sparse_tensor(sp_input) in_indices = array_ops.identity(sp_input.indices) in_values = array_ops.identity(sp_input.values) in_shape = array_ops.identity(sp_input.dense_shape) if new_shape is None: dim_low_bound = math_ops.reduce_max(in_indices, axis=0) output_shape_tensor = math_ops.maximum( array_ops.constant(0, dtype=dtypes.int64), math_ops.add(dim_low_bound, array_ops.ones_like(in_shape))) else: output_shape_tensor = ops.convert_to_tensor(new_shape) output_shape_tensor.get_shape().assert_has_rank(1) output_shape_tensor = math_ops.cast(output_shape_tensor, dtypes.int64) # For cases when shape is known during graph construction, this catches the # error before the sparse_tensor.SparseTensor catches it. output_shape_tensor.get_shape()[0].merge_with(in_shape.get_shape()[0]) output_shape_tensor_const = tensor_util.constant_value( output_shape_tensor) # For cases where all shapes are known during graph construction if (output_shape_tensor_const is not None and sp_input.get_shape().is_fully_defined()): in_shape_const = np.array(sp_input.get_shape().as_list()) if not np.all(in_shape_const <= output_shape_tensor_const): raise ValueError( "Requested new_shape should have dimension sizes >= sp_input.shape." " Found new_shape (%s), sp_input.shape (%s)." % (in_shape_const, output_shape_tensor_const)) output_shape_tensor = output_shape_tensor_const else: # For cases where shape is not known during graph construction. output_shape_tensor = control_flow_ops.with_dependencies( [check_ops.assert_equal( array_ops.shape(in_shape), array_ops.shape(output_shape_tensor))], output_shape_tensor) output_shape_tensor = control_flow_ops.with_dependencies( [check_ops.assert_less_equal(in_shape, output_shape_tensor)], output_shape_tensor) return sparse_tensor.SparseTensor(in_indices, in_values, output_shape_tensor) def sparse_fill_empty_rows(sp_input, default_value, name=None): """Fills empty rows in the input 2-D `SparseTensor` with a default value. This op adds entries with the specified `default_value` at index `[row, 0]` for any row in the input that does not already have a value. For example, suppose `sp_input` has shape `[5, 6]` and non-empty values: [0, 1]: a [0, 3]: b [2, 0]: c [3, 1]: d Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values: [0, 1]: a [0, 3]: b [1, 0]: default_value [2, 0]: c [3, 1]: d [4, 0]: default_value Note that the input may have empty columns at the end, with no effect on this op. The output `SparseTensor` will be in row-major order and will have the same shape as the input. This op also returns an indicator vector such that empty_row_indicator[i] = True iff row i was an empty row. Args: sp_input: A `SparseTensor` with shape `[N, M]`. default_value: The value to fill for empty rows, with the same type as `sp_input.` name: A name prefix for the returned tensors (optional) Returns: sp_ordered_output: A `SparseTensor` with shape `[N, M]`, and with all empty rows filled in with `default_value`. empty_row_indicator: A bool vector of length `N` indicating whether each input row was empty. Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) with ops.name_scope(name, "SparseFillEmptyRows", [sp_input]): default_value = ops.convert_to_tensor( default_value, dtype=sp_input.values.dtype) (output_indices, output_values, empty_row_indicator, unused_reverse_index_map) = gen_sparse_ops._sparse_fill_empty_rows( indices=sp_input.indices, values=sp_input.values, dense_shape=sp_input.dense_shape, default_value=default_value) return (sparse_tensor.SparseTensor(indices=output_indices, values=output_values, dense_shape=sp_input.dense_shape), empty_row_indicator) def serialize_sparse(sp_input, name=None, out_type=dtypes.string): """Serialize a `SparseTensor` into a 3-vector (1-D `Tensor`) object. Args: sp_input: The input `SparseTensor`. name: A name prefix for the returned tensors (optional). out_type: The `dtype` to use for serialization. Returns: A 3-vector (1-D `Tensor`), with each column representing the serialized `SparseTensor`'s indices, values, and shape (respectively). Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) return gen_sparse_ops._serialize_sparse( sp_input.indices, sp_input.values, sp_input.dense_shape, name=name, out_type=out_type) def serialize_many_sparse(sp_input, name=None, out_type=dtypes.string): """Serialize `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor`. The `SparseTensor` must have rank `R` greater than 1, and the first dimension is treated as the minibatch dimension. Elements of the `SparseTensor` must be sorted in increasing order of this first dimension. The serialized `SparseTensor` objects going into each row of the output `Tensor` will have rank `R-1`. The minibatch size `N` is extracted from `sparse_shape[0]`. Args: sp_input: The input rank `R` `SparseTensor`. name: A name prefix for the returned tensors (optional). out_type: The `dtype` to use for serialization. Returns: A matrix (2-D `Tensor`) with `N` rows and `3` columns. Each column represents serialized `SparseTensor`'s indices, values, and shape (respectively). Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) return gen_sparse_ops._serialize_many_sparse( sp_input.indices, sp_input.values, sp_input.dense_shape, name=name, out_type=out_type) def deserialize_sparse(serialized_sparse, dtype, rank=None, name=None): """Deserialize `SparseTensor` objects. The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where the last dimension stores serialized `SparseTensor` objects and the other N dimensions (N >= 0) correspond to a batch. The ranks of the original `SparseTensor` objects must all match. When the final `SparseTensor` is created, its rank is the rank of the incoming `SparseTensor` objects plus N; the sparse tensors have been concatenated along new dimensions, one for each batch. The output `SparseTensor` object's shape values for the original dimensions are the max across the input `SparseTensor` objects' shape values for the corresponding dimensions. The new dimensions match the size of the batch. The input `SparseTensor` objects' indices are assumed ordered in standard lexicographic order. If this is not the case, after this step run `SparseReorder` to restore index ordering. For example, if the serialized input is a `[2 x 3]` matrix representing two original `SparseTensor` objects: index = [ 0] [10] [20] values = [1, 2, 3] shape = [50] and index = [ 2] [10] values = [4, 5] shape = [30] then the final deserialized `SparseTensor` will be: index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5] shape = [2 50] Args: serialized_sparse: The serialized `SparseTensor` objects. The last dimension must have 3 columns. dtype: The `dtype` of the serialized `SparseTensor` objects. rank: (optional) Python int, the rank of the `SparseTensor` objects. name: A name prefix for the returned tensors (optional). Returns: A `SparseTensor` representing the deserialized `SparseTensor` objects. """ output_indices, output_values, output_shape = ( gen_sparse_ops._deserialize_sparse(serialized_sparse, dtype, name=name)) # Feed rank data back in, if available output_indices.set_shape([None, rank]) output_shape.set_shape([rank]) return sparse_tensor.SparseTensor(output_indices, output_values, output_shape) def deserialize_many_sparse(serialized_sparse, dtype, rank=None, name=None): """Deserialize and concatenate `SparseTensors` from a serialized minibatch. The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where `N` is the minibatch size and the rows correspond to packed outputs of `serialize_sparse`. The ranks of the original `SparseTensor` objects must all match. When the final `SparseTensor` is created, it has rank one higher than the ranks of the incoming `SparseTensor` objects (they have been concatenated along a new row dimension). The output `SparseTensor` object's shape values for all dimensions but the first are the max across the input `SparseTensor` objects' shape values for the corresponding dimensions. Its first shape value is `N`, the minibatch size. The input `SparseTensor` objects' indices are assumed ordered in standard lexicographic order. If this is not the case, after this step run `sparse_reorder` to restore index ordering. For example, if the serialized input is a `[2, 3]` matrix representing two original `SparseTensor` objects: index = [ 0] [10] [20] values = [1, 2, 3] shape = [50] and index = [ 2] [10] values = [4, 5] shape = [30] then the final deserialized `SparseTensor` will be: index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5] shape = [2 50] Args: serialized_sparse: 2-D `Tensor` of type `string` of shape `[N, 3]`. The serialized and packed `SparseTensor` objects. dtype: The `dtype` of the serialized `SparseTensor` objects. rank: (optional) Python int, the rank of the `SparseTensor` objects. name: A name prefix for the returned tensors (optional) Returns: A `SparseTensor` representing the deserialized `SparseTensor`s, concatenated along the `SparseTensor`s' first dimension. All of the serialized `SparseTensor`s must have had the same rank and type. """ output_indices, output_values, output_shape = ( gen_sparse_ops._deserialize_many_sparse( serialized_sparse, dtype, name=name)) # Feed rank data back in, if available output_indices.set_shape([None, rank]) output_shape.set_shape([rank]) return sparse_tensor.SparseTensor(output_indices, output_values, output_shape) def sparse_tensor_dense_matmul(sp_a, b, adjoint_a=False, adjoint_b=False, name=None): # pylint: disable=line-too-long """Multiply SparseTensor (of rank 2) "A" by dense matrix "B". No validity checking is performed on the indices of `A`. However, the following input format is recommended for optimal behavior: * If `adjoint_a == false`: `A` should be sorted in lexicographically increasing order. Use `sparse_reorder` if you're not sure. * If `adjoint_a == true`: `A` should be sorted in order of increasing dimension 1 (i.e., "column major" order instead of "row major" order). Using `tf.nn.embedding_lookup_sparse` for sparse multiplication: It's not obvious but you can consider `embedding_lookup_sparse` as another sparse and dense multiplication. In some situations, you may prefer to use `embedding_lookup_sparse` even though you're not dealing with embeddings. There are two questions to ask in the decision process: Do you need gradients computed as sparse too? Is your sparse data represented as two `SparseTensor`s: ids and values? There is more explanation about data format below. If you answer any of these questions as yes, consider using `tf.nn.embedding_lookup_sparse`. Following explains differences between the expected SparseTensors: For example if dense form of your sparse data has shape `[3, 5]` and values: [[ a ] [b c] [ d ]] `SparseTensor` format expected by `sparse_tensor_dense_matmul`: `sp_a` (indices, values): [0, 1]: a [1, 0]: b [1, 4]: c [2, 2]: d `SparseTensor` format expected by `embedding_lookup_sparse`: `sp_ids` `sp_weights` [0, 0]: 1 [0, 0]: a [1, 0]: 0 [1, 0]: b [1, 1]: 4 [1, 1]: c [2, 0]: 2 [2, 0]: d Deciding when to use `sparse_tensor_dense_matmul` vs. `matmul`(a_is_sparse=True): There are a number of questions to ask in the decision process, including: * Will the SparseTensor `A` fit in memory if densified? * Is the column count of the product large (>> 1)? * Is the density of `A` larger than approximately 15%? If the answer to several of these questions is yes, consider converting the `SparseTensor` to a dense one and using `tf.matmul` with `a_is_sparse=True`. This operation tends to perform well when `A` is more sparse, if the column size of the product is small (e.g. matrix-vector multiplication), if `sp_a.dense_shape` takes on large values. Below is a rough speed comparison between `sparse_tensor_dense_matmul`, labeled 'sparse', and `matmul`(a_is_sparse=True), labeled 'dense'. For purposes of the comparison, the time spent converting from a `SparseTensor` to a dense `Tensor` is not included, so it is overly conservative with respect to the time ratio. Benchmark system: CPU: Intel Ivybridge with HyperThreading (6 cores) dL1:32KB dL2:256KB dL3:12MB GPU: NVidia Tesla k40c Compiled with: `-c opt --config=cuda --copt=-mavx` ``` tensorflow/python/sparse_tensor_dense_matmul_op_test --benchmarks A sparse [m, k] with % nonzero values between 1% and 80% B dense [k, n] % nnz n gpu m k dt(dense) dt(sparse) dt(sparse)/dt(dense) 0.01 1 True 100 100 0.000221166 0.00010154 0.459112 0.01 1 True 100 1000 0.00033858 0.000109275 0.322745 0.01 1 True 1000 100 0.000310557 9.85661e-05 0.317385 0.01 1 True 1000 1000 0.0008721 0.000100875 0.115669 0.01 1 False 100 100 0.000208085 0.000107603 0.51711 0.01 1 False 100 1000 0.000327112 9.51118e-05 0.290762 0.01 1 False 1000 100 0.000308222 0.00010345 0.335635 0.01 1 False 1000 1000 0.000865721 0.000101397 0.117124 0.01 10 True 100 100 0.000218522 0.000105537 0.482958 0.01 10 True 100 1000 0.000340882 0.000111641 0.327506 0.01 10 True 1000 100 0.000315472 0.000117376 0.372064 0.01 10 True 1000 1000 0.000905493 0.000123263 0.136128 0.01 10 False 100 100 0.000221529 9.82571e-05 0.44354 0.01 10 False 100 1000 0.000330552 0.000112615 0.340687 0.01 10 False 1000 100 0.000341277 0.000114097 0.334324 0.01 10 False 1000 1000 0.000819944 0.000120982 0.147549 0.01 25 True 100 100 0.000207806 0.000105977 0.509981 0.01 25 True 100 1000 0.000322879 0.00012921 0.400181 0.01 25 True 1000 100 0.00038262 0.00014158 0.370035 0.01 25 True 1000 1000 0.000865438 0.000202083 0.233504 0.01 25 False 100 100 0.000209401 0.000104696 0.499979 0.01 25 False 100 1000 0.000321161 0.000130737 0.407076 0.01 25 False 1000 100 0.000377012 0.000136801 0.362856 0.01 25 False 1000 1000 0.000861125 0.00020272 0.235413 0.2 1 True 100 100 0.000206952 9.69219e-05 0.46833 0.2 1 True 100 1000 0.000348674 0.000147475 0.422959 0.2 1 True 1000 100 0.000336908 0.00010122 0.300439 0.2 1 True 1000 1000 0.001022 0.000203274 0.198898 0.2 1 False 100 100 0.000207532 9.5412e-05 0.459746 0.2 1 False 100 1000 0.000356127 0.000146824 0.41228 0.2 1 False 1000 100 0.000322664 0.000100918 0.312764 0.2 1 False 1000 1000 0.000998987 0.000203442 0.203648 0.2 10 True 100 100 0.000211692 0.000109903 0.519165 0.2 10 True 100 1000 0.000372819 0.000164321 0.440753 0.2 10 True 1000 100 0.000338651 0.000144806 0.427596 0.2 10 True 1000 1000 0.00108312 0.000758876 0.70064 0.2 10 False 100 100 0.000215727 0.000110502 0.512231 0.2 10 False 100 1000 0.000375419 0.0001613 0.429653 0.2 10 False 1000 100 0.000336999 0.000145628 0.432132 0.2 10 False 1000 1000 0.00110502 0.000762043 0.689618 0.2 25 True 100 100 0.000218705 0.000129913 0.594009 0.2 25 True 100 1000 0.000394794 0.00029428 0.745402 0.2 25 True 1000 100 0.000404483 0.0002693 0.665788 0.2 25 True 1000 1000 0.0012002 0.00194494 1.62052 0.2 25 False 100 100 0.000221494 0.0001306 0.589632 0.2 25 False 100 1000 0.000396436 0.000297204 0.74969 0.2 25 False 1000 100 0.000409346 0.000270068 0.659754 0.2 25 False 1000 1000 0.00121051 0.00193737 1.60046 0.5 1 True 100 100 0.000214981 9.82111e-05 0.456836 0.5 1 True 100 1000 0.000415328 0.000223073 0.537101 0.5 1 True 1000 100 0.000358324 0.00011269 0.314492 0.5 1 True 1000 1000 0.00137612 0.000437401 0.317851 0.5 1 False 100 100 0.000224196 0.000101423 0.452386 0.5 1 False 100 1000 0.000400987 0.000223286 0.556841 0.5 1 False 1000 100 0.000368825 0.00011224 0.304318 0.5 1 False 1000 1000 0.00136036 0.000429369 0.31563 0.5 10 True 100 100 0.000222125 0.000112308 0.505608 0.5 10 True 100 1000 0.000461088 0.00032357 0.701753 0.5 10 True 1000 100 0.000394624 0.000225497 0.571422 0.5 10 True 1000 1000 0.00158027 0.00190898 1.20801 0.5 10 False 100 100 0.000232083 0.000114978 0.495418 0.5 10 False 100 1000 0.000454574 0.000324632 0.714146 0.5 10 False 1000 100 0.000379097 0.000227768 0.600817 0.5 10 False 1000 1000 0.00160292 0.00190168 1.18638 0.5 25 True 100 100 0.00023429 0.000151703 0.647501 0.5 25 True 100 1000 0.000497462 0.000598873 1.20386 0.5 25 True 1000 100 0.000460778 0.000557038 1.20891 0.5 25 True 1000 1000 0.00170036 0.00467336 2.74845 0.5 25 False 100 100 0.000228981 0.000155334 0.678371 0.5 25 False 100 1000 0.000496139 0.000620789 1.25124 0.5 25 False 1000 100 0.00045473 0.000551528 1.21287 0.5 25 False 1000 1000 0.00171793 0.00467152 2.71927 0.8 1 True 100 100 0.000222037 0.000105301 0.47425 0.8 1 True 100 1000 0.000410804 0.000329327 0.801664 0.8 1 True 1000 100 0.000349735 0.000131225 0.375212 0.8 1 True 1000 1000 0.00139219 0.000677065 0.48633 0.8 1 False 100 100 0.000214079 0.000107486 0.502085 0.8 1 False 100 1000 0.000413746 0.000323244 0.781261 0.8 1 False 1000 100 0.000348983 0.000131983 0.378193 0.8 1 False 1000 1000 0.00136296 0.000685325 0.50282 0.8 10 True 100 100 0.000229159 0.00011825 0.516017 0.8 10 True 100 1000 0.000498845 0.000532618 1.0677 0.8 10 True 1000 100 0.000383126 0.00029935 0.781336 0.8 10 True 1000 1000 0.00162866 0.00307312 1.88689 0.8 10 False 100 100 0.000230783 0.000124958 0.541452 0.8 10 False 100 1000 0.000493393 0.000550654 1.11606 0.8 10 False 1000 100 0.000377167 0.000298581 0.791642 0.8 10 False 1000 1000 0.00165795 0.00305103 1.84024 0.8 25 True 100 100 0.000233496 0.000175241 0.75051 0.8 25 True 100 1000 0.00055654 0.00102658 1.84458 0.8 25 True 1000 100 0.000463814 0.000783267 1.68875 0.8 25 True 1000 1000 0.00186905 0.00755344 4.04132 0.8 25 False 100 100 0.000240243 0.000175047 0.728625 0.8 25 False 100 1000 0.000578102 0.00104499 1.80763 0.8 25 False 1000 100 0.000485113 0.000776849 1.60138 0.8 25 False 1000 1000 0.00211448 0.00752736 3.55992 ``` Args: sp_a: SparseTensor A, of rank 2. b: A dense Matrix with the same dtype as sp_a. adjoint_a: Use the adjoint of A in the matrix multiply. If A is complex, this is transpose(conj(A)). Otherwise it's transpose(A). adjoint_b: Use the adjoint of B in the matrix multiply. If B is complex, this is transpose(conj(B)). Otherwise it's transpose(B). name: A name prefix for the returned tensors (optional) Returns: A dense matrix (pseudo-code in dense np.matrix notation): `A = A.H if adjoint_a else A` `B = B.H if adjoint_b else B` `return A*B` """ # pylint: enable=line-too-long sp_a = _convert_to_sparse_tensor(sp_a) with ops.name_scope(name, "SparseTensorDenseMatMul", [sp_a.indices, sp_a.values, b]) as name: b = ops.convert_to_tensor(b, name="b") return gen_sparse_ops._sparse_tensor_dense_mat_mul( a_indices=sp_a.indices, a_values=sp_a.values, a_shape=sp_a.dense_shape, b=b, adjoint_a=adjoint_a, adjoint_b=adjoint_b) def sparse_softmax(sp_input, name=None): """Applies softmax to a batched N-D `SparseTensor`. The inputs represent an N-D SparseTensor with logical shape `[..., B, C]` (where `N >= 2`), and with indices sorted in the canonical lexicographic order. This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost logical submatrix with shape `[B, C]`, but with the catch that *the implicitly zero elements do not participate*. Specifically, the algorithm is equivalent to: (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix with shape `[B, C]`, along the size-C dimension; (2) Masks out the original implicitly-zero locations; (3) Renormalizes the remaining elements. Hence, the `SparseTensor` result has exactly the same non-zero indices and shape. Example: ```python # First batch: # [? e.] # [1. ? ] # Second batch: # [e ? ] # [e e ] shape = [2, 2, 2] # 3-D SparseTensor values = np.asarray([[[0., np.e], [1., 0.]], [[np.e, 0.], [np.e, np.e]]]) indices = np.vstack(np.where(values)).astype(np.int64).T result = tf.sparse_softmax(tf.SparseTensor(indices, values, shape)) # ...returning a 3-D SparseTensor, equivalent to: # [? 1.] [1 ?] # [1. ? ] and [.5 .5] # where ? means implicitly zero. ``` Args: sp_input: N-D `SparseTensor`, where `N >= 2`. name: optional name of the operation. Returns: output: N-D `SparseTensor` representing the results. """ with ops.name_scope(name, "SparseSoftmax", [sp_input.indices, sp_input.values]) as name: out_vals = gen_sparse_ops.sparse_softmax(sp_input.indices, sp_input.values, sp_input.dense_shape) return sparse_tensor.SparseTensor( sp_input.indices, out_vals, sp_input.dense_shape) def sparse_maximum(sp_a, sp_b, name=None): """Returns the element-wise max of two SparseTensors. Assumes the two SparseTensors have the same shape, i.e., no broadcasting. Example: ```python sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7]) sp_one = sparse_tensor.SparseTensor([[1]], [1], [7]) res = tf.sparse_maximum(sp_zero, sp_one).eval() # "res" should be equal to SparseTensor([[0], [1]], [0, 1], [7]). ``` Args: sp_a: a `SparseTensor` operand whose dtype is real, and indices lexicographically ordered. sp_b: the other `SparseTensor` operand with the same requirements (and the same shape). name: optional name of the operation. Returns: output: the output SparseTensor. """ with ops.name_scope(name, "SparseSparseMaximum", [sp_a.indices, sp_a.values, sp_b.indices, sp_b.values]) as name: out_indices, out_values = gen_sparse_ops.sparse_sparse_maximum( sp_a.indices, sp_a.values, sp_a.dense_shape, sp_b.indices, sp_b.values, sp_b.dense_shape, name=name) return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape) def sparse_minimum(sp_a, sp_b, name=None): """Returns the element-wise min of two SparseTensors. Assumes the two SparseTensors have the same shape, i.e., no broadcasting. Example: ```python sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7]) sp_one = sparse_tensor.SparseTensor([[1]], [1], [7]) res = tf.sparse_minimum(sp_zero, sp_one).eval() # "res" should be equal to SparseTensor([[0], [1]], [0, 0], [7]). ``` Args: sp_a: a `SparseTensor` operand whose dtype is real, and indices lexicographically ordered. sp_b: the other `SparseTensor` operand with the same requirements (and the same shape). name: optional name of the operation. Returns: output: the output SparseTensor. """ with ops.name_scope(name, "SparseSparseMinimum", [sp_a.indices, sp_a.values, sp_b.indices, sp_b.values]) as name: out_indices, out_values = gen_sparse_ops.sparse_sparse_minimum( sp_a.indices, sp_a.values, sp_a.dense_shape, sp_b.indices, sp_b.values, sp_b.dense_shape, name=name) return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape) def sparse_transpose(sp_input, perm=None, name=None): """Transposes a `SparseTensor` The returned tensor's dimension i will correspond to the input dimension `perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is the rank of the input tensor. Hence by default, this operation performs a regular matrix transpose on 2-D input Tensors. For example, if `sp_input` has shape `[4, 5]` and `indices` / `values`: [0, 3]: b [0, 1]: a [3, 1]: d [2, 0]: c then the output will be a `SparseTensor` of shape `[5, 4]` and `indices` / `values`: [0, 2]: c [1, 0]: a [1, 3]: d [3, 0]: b Args: sp_input: The input `SparseTensor`. perm: A permutation of the dimensions of `sp_input`. name: A name prefix for the returned tensors (optional) Returns: A transposed `SparseTensor`. Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ with ops.name_scope(name, "SparseTranspose", [sp_input]) as name: if perm is None: rank = array_ops.rank(sp_input) perm = (rank - 1) - math_ops.range(0, rank, 1) indices = sp_input.indices transposed_indices = array_ops.transpose( array_ops.gather(array_ops.transpose(indices), perm)) perm_ = tensor_util.constant_value(ops.convert_to_tensor(perm)) if perm_ is not None and sp_input.get_shape().is_fully_defined(): old_shape_ = sp_input.get_shape().as_list() transposed_dense_shape = list(old_shape_) # Copy. for i, p in enumerate(perm_): transposed_dense_shape[i] = old_shape_[p] else: dense_shape = sp_input.dense_shape transposed_dense_shape = array_ops.gather(dense_shape, perm) transposed_st = sparse_tensor.SparseTensor( transposed_indices, sp_input.values, transposed_dense_shape) transposed_st = sparse_reorder(transposed_st) return transposed_st def _add_sparse_to_tensors_map(sp_input, container=None, shared_name=None, name=None): """Add a `SparseTensor` to a `SparseTensorsMap` and return its handle. Args: sp_input: The input `SparseTensor`. container: The container for the underlying `SparseTensorsMap` (optional). shared_name: The shared name for the underlying `SparseTensorsMap` (optional, defaults to the name of the newly created op). name: A name prefix for the returned tensors (optional). Returns: A string 1-vector (1D `Tensor`), with the single element representing the a unique handle to a `SparseTensor` stored by the `SparseTensorMap` underlying this op. Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) return gen_sparse_ops._add_sparse_to_tensors_map( sp_input.indices, sp_input.values, sp_input.dense_shape, container=container, shared_name=shared_name, name=name) def _add_many_sparse_to_tensors_map(sp_input, container=None, shared_name=None, name=None): """Add a minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles. The `SparseTensor` must have rank `R` greater than 1, and the first dimension is treated as the minibatch dimension. Elements of the `SparseTensor` must be sorted in increasing order of this first dimension. The serialized `SparseTensor` objects going into each row of the output `Tensor` will have rank `R-1`. The minibatch size `N` is extracted from `sparse_shape[0]`. Args: sp_input: The input rank `R` `SparseTensor`. container: The container for the underlying `SparseTensorsMap` (optional). shared_name: The shared name for the underlying `SparseTensorsMap` (optional, defaults to the name of the newly created op). name: A name prefix for the returned tensors (optional). Returns: A string matrix (2-D `Tensor`) with `N` rows and `1` column. Each row represents a unique handle to a `SparseTensor` stored by the `SparseTensorMap` underlying this op. Raises: TypeError: If `sp_input` is not a `SparseTensor`. """ sp_input = _convert_to_sparse_tensor(sp_input) return gen_sparse_ops._add_many_sparse_to_tensors_map( sp_input.indices, sp_input.values, sp_input.dense_shape, container=container, shared_name=shared_name, name=name) def _take_many_sparse_from_tensors_map( sparse_map_op, sparse_handles, rank=None, name=None): """Read `SparseTensors` from a `SparseTensorsMap` and concatenate them. The input `sparse_handles` must be a string matrix of shape `[N, 1]` where `N` is the minibatch size and the rows correspond to packed outputs of `add_sparse_to_tensors_map`. The ranks of the original `SparseTensor` objects must all match. When the final `SparseTensor` is created, it has rank one higher than the ranks of the incoming `SparseTensor` objects (they have been concatenated along a new row dimension). The output `SparseTensor` object's shape values for all dimensions but the first are the max across the input `SparseTensor` objects' shape values for the corresponding dimensions. Its first shape value is `N`, the minibatch size. The input `SparseTensor` objects' indices are assumed ordered in standard lexicographic order. If this is not the case, after this step run `sparse_reorder` to restore index ordering. For example, if the serialized input is a `[2, 3]` matrix representing two original `SparseTensor` objects: index = [ 0] [10] [20] values = [1, 2, 3] shape = [50] and index = [ 2] [10] values = [4, 5] shape = [30] then the final deserialized `SparseTensor` will be: index = [0 0] [0 10] [0 20] [1 2] [1 10] values = [1, 2, 3, 4, 5] shape = [2 50] Args: sparse_map_op: The `Operation` that created the original handles. Usually this is, e.g., `add_sparse_to_tensors_map(...).op`. sparse_handles: 2-D `Tensor` of type `string` of shape `[N, 1]`. The serialized and packed `SparseTensor` objects. rank: (optional) Python int, the rank of the `SparseTensor` objects. name: A name prefix for the returned tensors (optional) Returns: A `SparseTensor` representing the deserialized `SparseTensor`s, concatenated along the `SparseTensor`s' first dimension. All of the serialized `SparseTensor`s must have had the same rank and type. """ if not isinstance(sparse_map_op, ops.Operation): raise TypeError("sparse_map_op be an Operation") if sparse_map_op.type not in ("AddSparseToTensorsMap", "AddManySparseToTensorsMap"): raise TypeError("sparse_map_op must be one of AddSparseToTensorsMap or " "AddSparseToTensorsMap. Instead, found `%s`." % sparse_map_op.type) with ops.colocate_with(sparse_map_op): shared_name = sparse_map_op.get_attr("shared_name") or sparse_map_op.name output_indices, output_values, output_shape = ( gen_sparse_ops._take_many_sparse_from_tensors_map( sparse_handles, dtype=sparse_map_op.get_attr("T"), container=sparse_map_op.get_attr("container"), shared_name=shared_name, name=name)) # Feed rank data back in, if available output_indices.set_shape([None, rank]) output_shape.set_shape([rank]) return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)
apache-2.0
lewislone/mStocks
packets-analysis/lib/XlsxWriter-0.7.3/examples/macros.py
9
1042
####################################################################### # # An example of adding macros to an XlsxWriter file using a VBA project # file extracted from an existing Excel xlsm file. # # The vba_extract.py utility supplied with XlsxWriter can be used to extract # the vbaProject.bin file. # # An embedded macro is connected to a form button on the worksheet. # # Copyright 2013-2015, John McNamara, jmcnamara@cpan.org # import xlsxwriter # Note the file extension should be .xlsm. workbook = xlsxwriter.Workbook('macros.xlsm') worksheet = workbook.add_worksheet() worksheet.set_column('A:A', 30) # Add the VBA project binary. workbook.add_vba_project('./vbaProject.bin') # Show text for the end user. worksheet.write('A3', 'Press the button to say hello.') # Add a button tied to a macro in the VBA project. worksheet.insert_button('B3', {'macro': 'say_hello', 'caption': 'Press Me', 'width': 80, 'height': 30}) workbook.close()
mit
moble/sympy
sympy/parsing/ast_parser.py
122
2811
""" This module implements the functionality to take any Python expression as a string and fix all numbers and other things before evaluating it, thus 1/2 returns Integer(1)/Integer(2) We use the Python ast module for that, which is in python2.6 and later. It is well documented at docs.python.org. Some tips to understand how this works: use dump() to get a nice representation of any node. Then write a string of what you want to get, e.g. "Integer(1)", parse it, dump it and you'll see that you need to do "Call(Name('Integer', Load()), [node], [], None, None)". You don't need to bother with lineno and col_offset, just call fix_missing_locations() before returning the node. """ from __future__ import print_function, division from sympy.core.basic import Basic from sympy.core.compatibility import exec_ from sympy.core.sympify import SympifyError from ast import parse, NodeTransformer, Call, Name, Load, \ fix_missing_locations, Str, Tuple class Transform(NodeTransformer): def __init__(self, local_dict, global_dict): NodeTransformer.__init__(self) self.local_dict = local_dict self.global_dict = global_dict def visit_Num(self, node): if isinstance(node.n, int): return fix_missing_locations(Call(Name('Integer', Load()), [node], [], None, None)) elif isinstance(node.n, float): return fix_missing_locations(Call(Name('Float', Load()), [node], [], None, None)) return node def visit_Name(self, node): if node.id in self.local_dict: return node elif node.id in self.global_dict: name_obj = self.global_dict[node.id] if isinstance(name_obj, (Basic, type)) or callable(name_obj): return node elif node.id in ['True', 'False']: return node return fix_missing_locations(Call(Name('Symbol', Load()), [Str(node.id)], [], None, None)) def visit_Lambda(self, node): args = [self.visit(arg) for arg in node.args.args] body = self.visit(node.body) n = Call(Name('Lambda', Load()), [Tuple(args, Load()), body], [], None, None) return fix_missing_locations(n) def parse_expr(s, local_dict): """ Converts the string "s" to a SymPy expression, in local_dict. It converts all numbers to Integers before feeding it to Python and automatically creates Symbols. """ global_dict = {} exec_('from sympy import *', global_dict) try: a = parse(s.strip(), mode="eval") except SyntaxError: raise SympifyError("Cannot parse %s." % repr(s)) a = Transform(local_dict, global_dict).visit(a) e = compile(a, "<string>", "eval") return eval(e, global_dict, local_dict)
bsd-3-clause
csicar/blynk-library
tests/pseudo-server-dw-mt.py
20
5266
#!/usr/bin/python ''' This is a pseudo-server that sends predefined pattern to any connected client. It is used to test transport behaviour and throughput. If you want to use it with a sketch, connect your PC and Blynk-enabled device into the same network and configure Blynk to connect to this pseudo-server: IPAddress serv(192,168,0,105); // IP address of your PC Blynk.begin(auth, serv, 8888); Author: Volodymyr Shymanskyy License: The MIT license ''' import select, socket, struct import os, sys, time, getopt from threading import Thread # Configuration options # Parse command line options try: opts, args = getopt.getopt(sys.argv[1:], "hb:p:", ["help", "bind=", "port=", "sndbuf=", "rcvbuf=", "nodelay", "sleep=", "qty=", "freq=", "pin=", "dump"]) except getopt.GetoptError: print >>sys.stderr, __doc__ sys.exit(2) # Default options HOST = '' # Bind to all interfaces PORT = 8888 # Bind to port 8888 NODELAY = 0 # No TCP_NODELAY SNDBUF = 0 # No SNDBUF override RCVBUF = 0 # No RCVBUF override MSG_QTY = 10 # Amount of messages SLEEP = 1.0 # Wait some time between IO HW_PIN = 14 # Pin # DUMP = 0 for o, v in opts: if o in ("-h", "--help"): print __doc__ sys.exit() elif o in ("-b", "--bind"): HOST = v elif o in ("-p", "--port"): PORT = int(v) elif o in ("--sndbuf",): SNDBUF = int(v) elif o in ("--rcvbuf",): RCVBUF = int(v) elif o in ("--nodelay",): NODELAY = 1 elif o in ("--sleep",): SLEEP = float(v) elif o in ("--freq",): SLEEP = 1.0/float(v) elif o in ("--qty",): MSG_QTY = int(v) elif o in ("--pin",): HW_PIN = int(v) elif o in ("--dump",): DUMP = 1 # Blynk protocol helpers hdr = struct.Struct("!BHH") class MsgType: RSP = 0 LOGIN = 2 PING = 6 HW = 20 class MsgStatus: OK = 200 def hw(*args): # Convert params to string and join using \0 data = "\0".join(map(str, args)) dump("< " + " ".join(map(str, args))) # Prepend HW command header return hdr.pack(MsgType.HW, 1, len(data)) + data # Print utilities start_time = time.time() def log(msg): print "[{:7.3f}] {:}".format(float(time.time() - start_time), msg) draw_col = 0 def draw(c): global draw_col if not DUMP: sys.stdout.write(c) draw_col = (draw_col + 1) % 120 if draw_col: sys.stdout.flush() else: sys.stdout.write("\n") def dump(msg): if DUMP: log(msg) def receive(sock, length): d = [] l = 0 while l < length: r = sock.recv(length-l) if not r: return '' d.append(r) l += len(r) return ''.join(d) # Threads def readthread(conn, addr): global msgs_in, authenticated while(msgs_in < MSG_QTY): data = receive(conn, hdr.size) if not data: break msg_type, msg_id, msg_len = hdr.unpack(data) #dump("Got {0}, {1}, {2}".format(msg_type, msg_id, msg_len)) if msg_type == MsgType.RSP: pass elif msg_type == MsgType.LOGIN: auth = receive(conn, msg_len) log("Auth {0}".format(auth)) # Send auth OK and pin modes conn.sendall(hdr.pack(MsgType.RSP, msg_id, MsgStatus.OK)) conn.sendall(hw("pm", HW_PIN, "out")) authenticated = True elif msg_type == MsgType.PING: log("Ping") # Send Pong conn.sendall(hdr.pack(MsgType.RSP, msg_id, MsgStatus.OK)) elif msg_type == MsgType.HW: data = receive(conn, msg_len) # Print HW messages (just for fun :) draw('v') dump("> " + " ".join(data.split("\0"))) msgs_in += 1 else: log("Unknown msg type") break def writethread(conn, addr): global msgs_out, authenticated val = 0 while (msgs_out < MSG_QTY): if authenticated: conn.sendall(hw("dw", HW_PIN, val)) val = 0 if val else 1 draw('.') msgs_out += 1 time.sleep(SLEEP) # Main code serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: # Set SO_REUSEADDR, this is needed to ignore WAIT state on next run serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) serv.bind((HOST, PORT)) except socket.error as msg: log('Bind failed. Error Code: {0}, Msg: {1}'.format(str(msg[0]), msg[1])) sys.exit() serv.listen(1) log('Listening on port %d' % PORT) # Wait for clients #while True: conn, addr = serv.accept() log('Connection from {0}:{1}'.format(addr[0], str(addr[1]))) if NODELAY != 0: conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) if SNDBUF != 0: sndbuf = conn.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF) log('Default SNDBUF %s changed to %s' % (sndbuf, SNDBUF)) conn.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, SNDBUF) if RCVBUF != 0: rcvbuf = conn.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF) log('Default RCVBUF %s changed to %s' % (rcvbuf, RCVBUF)) conn.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, RCVBUF) proc_start = time.time() msgs_in = 0 msgs_out = 0 authenticated = False wt = Thread(target=readthread, args=(conn, addr)) rt = Thread(target=writethread, args=(conn, addr)) wt.start() rt.start() wt.join() #rt.join() conn.close() draw("\n") log("Time %3.4f" % (time.time() - proc_start)) log("Sent {0} messages".format(msgs_out)) log("Recv {0} messages".format(msgs_in))
mit
enochd/RMG-Java
scripts/distGeomScriptMolLowestEnergyConf.py
9
1449
# gmagoon 7/22/09: writes the lowest energy conformation for mole file in # arg#1 to mole file in arg#2 (and corresponding crude mole file in arg#3), based on UFF energy of arg#4 embeddings # (optimized); should be reproducible due to use of consistent randomSeed #updated 8/12/09 for Q2 2009 version of RDKit (rdkit "import" lines of script) #arg#5 should contain the absolute path of the RDBASE environment variable import sys sys.path.insert(1, sys.argv[5])#add $RDBASE to the PYTHONPATH so that import statements below work properly from rdkit import Chem from rdkit.Chem import AllChem attempts=int(sys.argv[4]) m = Chem.MolFromMolFile(sys.argv[1], removeHs=False) #m2=Chem.AddHs(m) AllChem.EmbedMultipleConfs(m, attempts,randomSeed=1) m2crude = Chem.Mol(m.ToBinary()) #make a copy of the (crude) coordinates via ToBinary energy=0.0 minEid=0; lowestE=9.999999e99;#start with a very high number, which would never be reached for i in range(m.GetNumConformers()): AllChem.UFFOptimizeMolecule(m,confId=i) energy=AllChem.UFFGetMoleculeForceField(m,confId=i).CalcEnergy() if (energy < lowestE): minEid = i lowestE = energy #energy.append(AllChem.UFFGetMoleculeForceField(m,confId=i).CalcEnergy()) f=open(sys.argv[2], 'w') print >>f,Chem.MolToMolBlock(m,confId=minEid) f.close() f=open(sys.argv[3], 'w')#write crude coordinates print >>f,Chem.MolToMolBlock(m2crude,confId=minEid) f.close()
mit
blokweb/androguard
tests/test_dex.py
37
2862
#!/usr/bin/env python import logging import datetime import sys PATH_INSTALL = "./" sys.path.append(PATH_INSTALL) from optparse import OptionParser from androguard.core.analysis import auto from androguard.core.androconf import set_debug option_0 = {'name': ('-d', '--directory'), 'help': 'directory input', 'nargs': 1} option_1 = {'name': ('-v', '--verbose'), 'help': 'add debug', 'action': 'count'} options = [option_0, option_1] logger = logging.getLogger("main") console_handler = logging.StreamHandler() console_handler.setFormatter(logging.Formatter("%(message)s")) logger.addHandler(console_handler) logger.setLevel(logging.INFO) def test(got, expected): if got == expected: prefix = ' OK ' else: prefix = ' X ' print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected)), return (got == expected) class AndroLog: def __init__(self, id_file, filename): self.id_file = id_file self.filename = filename def dump(self, msg): now = datetime.datetime.now() str_date = now.strftime("%Y-%m-%d %H:%M:%S ") logger.info(str_date + "%s[%d]: %s" % (self.filename, self.id_file, msg)) def error(self, msg): now = datetime.datetime.now() str_date = now.strftime("%Y-%m-%d %H:%M:%S ") logger.info(str_date + "ERROR %s[%d]: %s" % (self.filename, self.id_file, msg)) import traceback traceback.print_exc() class MyDEXAnalysis(auto.DirectoryAndroAnalysis): def __init__(self, directory): super(MyDEXAnalysis, self).__init__(directory) def filter_file(self, log, fileraw): ret, file_type = super(MyDEXAnalysis, self).filter_file(log, fileraw) if file_type != "APK" and file_type != "DEX" and file_type != "DEY": return (False, None) return (ret, file_type) def analysis_dex(self, log, dex): log.dump("%s" % str(dex)) for method in dex.get_methods(): idx = 0 for i in method.get_instructions(): i.get_name(), i.show_buff(idx) idx += i.get_length() return False def analysis_dey(self, log, dey): log.dump("%s" % str(dey)) for method in dey.get_methods(): idx = 0 for i in method.get_instructions(): i.get_name(), i.show_buff(idx) idx += i.get_length() return False def crash(self, log, why): log.error(why) def main(options, arguments): if options.verbose: set_debug() if options.directory: settings = { "my": MyDEXAnalysis(options.directory), "log": AndroLog, "max_fetcher": 3, } aa = auto.AndroAuto(settings) aa.go() if __name__ == "__main__": parser = OptionParser() for option in options: param = option['name'] del option['name'] parser.add_option(*param, **option) options, arguments = parser.parse_args() sys.argv[:] = arguments main(options, arguments)
apache-2.0
pbrod/scipy
scipy/linalg/tests/test_basic.py
2
61854
# # Created by: Pearu Peterson, March 2002 # """ Test functions for linalg.basic module """ from __future__ import division, print_function, absolute_import import warnings import itertools import numpy as np from numpy import (arange, array, dot, zeros, identity, conjugate, transpose, float32) import numpy.linalg as linalg from numpy.random import random from numpy.testing import (TestCase, run_module_suite, assert_raises, assert_equal, assert_almost_equal, assert_, assert_array_almost_equal, assert_allclose, assert_array_equal, dec) from scipy.linalg import (solve, inv, det, lstsq, pinv, pinv2, pinvh, norm, solve_banded, solveh_banded, solve_triangular, solve_circulant, circulant, LinAlgError, block_diag, matrix_balance) from scipy.linalg.basic import LstsqLapackError from scipy.linalg._testutils import assert_no_overwrite from scipy._lib._version import NumpyVersion """ Bugs: 1) solve.check_random_sym_complex fails if a is complex and transpose(a) = conjugate(a) (a is Hermitian). """ __usage__ = """ Build linalg: python setup_linalg.py build Run tests if scipy is installed: python -c 'import scipy;scipy.linalg.test()' Run tests if linalg is not installed: python tests/test_basic.py """ REAL_DTYPES = [np.float32, np.float64, np.longdouble] COMPLEX_DTYPES = [np.complex64, np.complex128, np.clongdouble] DTYPES = REAL_DTYPES + COMPLEX_DTYPES def _eps_cast(dtyp): """Get the epsilon for dtype, possibly downcast to BLAS types.""" dt = dtyp if dt == np.longdouble: dt = np.float64 elif dt == np.clongdouble: dt = np.complex128 return np.finfo(dt).eps class TestSolveBanded(TestCase): def test_real(self): a = array([[1.0, 20, 0, 0], [-30, 4, 6, 0], [2, 1, 20, 2], [0, -1, 7, 14]]) ab = array([[0.0, 20, 6, 2], [1, 4, 20, 14], [-30, 1, 7, 0], [2, -1, 0, 0]]) l, u = 2, 1 b4 = array([10.0, 0.0, 2.0, 14.0]) b4by1 = b4.reshape(-1, 1) b4by2 = array([[2, 1], [-30, 4], [2, 3], [1, 3]]) b4by4 = array([[1, 0, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0], [0, 1, 0, 0]]) for b in [b4, b4by1, b4by2, b4by4]: x = solve_banded((l, u), ab, b) assert_array_almost_equal(dot(a, x), b) def test_complex(self): a = array([[1.0, 20, 0, 0], [-30, 4, 6, 0], [2j, 1, 20, 2j], [0, -1, 7, 14]]) ab = array([[0.0, 20, 6, 2j], [1, 4, 20, 14], [-30, 1, 7, 0], [2j, -1, 0, 0]]) l, u = 2, 1 b4 = array([10.0, 0.0, 2.0, 14.0j]) b4by1 = b4.reshape(-1, 1) b4by2 = array([[2, 1], [-30, 4], [2, 3], [1, 3]]) b4by4 = array([[1, 0, 0, 0], [0, 0, 0, 1j], [0, 1, 0, 0], [0, 1, 0, 0]]) for b in [b4, b4by1, b4by2, b4by4]: x = solve_banded((l, u), ab, b) assert_array_almost_equal(dot(a, x), b) def test_tridiag_real(self): ab = array([[0.0, 20, 6, 2], [1, 4, 20, 14], [-30, 1, 7, 0]]) a = np.diag(ab[0, 1:], 1) + np.diag(ab[1, :], 0) + np.diag( ab[2, :-1], -1) b4 = array([10.0, 0.0, 2.0, 14.0]) b4by1 = b4.reshape(-1, 1) b4by2 = array([[2, 1], [-30, 4], [2, 3], [1, 3]]) b4by4 = array([[1, 0, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0], [0, 1, 0, 0]]) for b in [b4, b4by1, b4by2, b4by4]: x = solve_banded((1, 1), ab, b) assert_array_almost_equal(dot(a, x), b) def test_tridiag_complex(self): ab = array([[0.0, 20, 6, 2j], [1, 4, 20, 14], [-30, 1, 7, 0]]) a = np.diag(ab[0, 1:], 1) + np.diag(ab[1, :], 0) + np.diag( ab[2, :-1], -1) b4 = array([10.0, 0.0, 2.0, 14.0j]) b4by1 = b4.reshape(-1, 1) b4by2 = array([[2, 1], [-30, 4], [2, 3], [1, 3]]) b4by4 = array([[1, 0, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0], [0, 1, 0, 0]]) for b in [b4, b4by1, b4by2, b4by4]: x = solve_banded((1, 1), ab, b) assert_array_almost_equal(dot(a, x), b) def test_check_finite(self): a = array([[1.0, 20, 0, 0], [-30, 4, 6, 0], [2, 1, 20, 2], [0, -1, 7, 14]]) ab = array([[0.0, 20, 6, 2], [1, 4, 20, 14], [-30, 1, 7, 0], [2, -1, 0, 0]]) l, u = 2, 1 b4 = array([10.0, 0.0, 2.0, 14.0]) x = solve_banded((l, u), ab, b4, check_finite=False) assert_array_almost_equal(dot(a, x), b4) def test_bad_shape(self): ab = array([[0.0, 20, 6, 2], [1, 4, 20, 14], [-30, 1, 7, 0], [2, -1, 0, 0]]) l, u = 2, 1 bad = array([1.0, 2.0, 3.0, 4.0]).reshape(-1, 4) assert_raises(ValueError, solve_banded, (l, u), ab, bad) assert_raises(ValueError, solve_banded, (l, u), ab, [1.0, 2.0]) # Values of (l,u) are not compatible with ab. assert_raises(ValueError, solve_banded, (1, 1), ab, [1.0, 2.0]) def test_1x1(self): b = array([[1., 2., 3.]]) x = solve_banded((1, 1), [[0], [2], [0]], b) assert_array_equal(x, [[0.5, 1.0, 1.5]]) assert_equal(x.dtype, np.dtype('f8')) assert_array_equal(b, [[1.0, 2.0, 3.0]]) def test_native_list_arguments(self): a = [[1.0, 20, 0, 0], [-30, 4, 6, 0], [2, 1, 20, 2], [0, -1, 7, 14]] ab = [[0.0, 20, 6, 2], [1, 4, 20, 14], [-30, 1, 7, 0], [2, -1, 0, 0]] l, u = 2, 1 b = [10.0, 0.0, 2.0, 14.0] x = solve_banded((l, u), ab, b) assert_array_almost_equal(dot(a, x), b) class TestSolveHBanded(TestCase): def test_01_upper(self): # Solve # [ 4 1 2 0] [1] # [ 1 4 1 2] X = [4] # [ 2 1 4 1] [1] # [ 0 2 1 4] [2] # with the RHS as a 1D array. ab = array([[0.0, 0.0, 2.0, 2.0], [-99, 1.0, 1.0, 1.0], [4.0, 4.0, 4.0, 4.0]]) b = array([1.0, 4.0, 1.0, 2.0]) x = solveh_banded(ab, b) assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0]) def test_02_upper(self): # Solve # [ 4 1 2 0] [1 6] # [ 1 4 1 2] X = [4 2] # [ 2 1 4 1] [1 6] # [ 0 2 1 4] [2 1] # ab = array([[0.0, 0.0, 2.0, 2.0], [-99, 1.0, 1.0, 1.0], [4.0, 4.0, 4.0, 4.0]]) b = array([[1.0, 6.0], [4.0, 2.0], [1.0, 6.0], [2.0, 1.0]]) x = solveh_banded(ab, b) expected = array([[0.0, 1.0], [1.0, 0.0], [0.0, 1.0], [0.0, 0.0]]) assert_array_almost_equal(x, expected) def test_03_upper(self): # Solve # [ 4 1 2 0] [1] # [ 1 4 1 2] X = [4] # [ 2 1 4 1] [1] # [ 0 2 1 4] [2] # with the RHS as a 2D array with shape (3,1). ab = array([[0.0, 0.0, 2.0, 2.0], [-99, 1.0, 1.0, 1.0], [4.0, 4.0, 4.0, 4.0]]) b = array([1.0, 4.0, 1.0, 2.0]).reshape(-1, 1) x = solveh_banded(ab, b) assert_array_almost_equal(x, array([0., 1., 0., 0.]).reshape(-1, 1)) def test_01_lower(self): # Solve # [ 4 1 2 0] [1] # [ 1 4 1 2] X = [4] # [ 2 1 4 1] [1] # [ 0 2 1 4] [2] # ab = array([[4.0, 4.0, 4.0, 4.0], [1.0, 1.0, 1.0, -99], [2.0, 2.0, 0.0, 0.0]]) b = array([1.0, 4.0, 1.0, 2.0]) x = solveh_banded(ab, b, lower=True) assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0]) def test_02_lower(self): # Solve # [ 4 1 2 0] [1 6] # [ 1 4 1 2] X = [4 2] # [ 2 1 4 1] [1 6] # [ 0 2 1 4] [2 1] # ab = array([[4.0, 4.0, 4.0, 4.0], [1.0, 1.0, 1.0, -99], [2.0, 2.0, 0.0, 0.0]]) b = array([[1.0, 6.0], [4.0, 2.0], [1.0, 6.0], [2.0, 1.0]]) x = solveh_banded(ab, b, lower=True) expected = array([[0.0, 1.0], [1.0, 0.0], [0.0, 1.0], [0.0, 0.0]]) assert_array_almost_equal(x, expected) def test_01_float32(self): # Solve # [ 4 1 2 0] [1] # [ 1 4 1 2] X = [4] # [ 2 1 4 1] [1] # [ 0 2 1 4] [2] # ab = array([[0.0, 0.0, 2.0, 2.0], [-99, 1.0, 1.0, 1.0], [4.0, 4.0, 4.0, 4.0]], dtype=float32) b = array([1.0, 4.0, 1.0, 2.0], dtype=float32) x = solveh_banded(ab, b) assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0]) def test_02_float32(self): # Solve # [ 4 1 2 0] [1 6] # [ 1 4 1 2] X = [4 2] # [ 2 1 4 1] [1 6] # [ 0 2 1 4] [2 1] # ab = array([[0.0, 0.0, 2.0, 2.0], [-99, 1.0, 1.0, 1.0], [4.0, 4.0, 4.0, 4.0]], dtype=float32) b = array([[1.0, 6.0], [4.0, 2.0], [1.0, 6.0], [2.0, 1.0]], dtype=float32) x = solveh_banded(ab, b) expected = array([[0.0, 1.0], [1.0, 0.0], [0.0, 1.0], [0.0, 0.0]]) assert_array_almost_equal(x, expected) def test_01_complex(self): # Solve # [ 4 -j 2 0] [2-j] # [ j 4 -j 2] X = [4-j] # [ 2 j 4 -j] [4+j] # [ 0 2 j 4] [2+j] # ab = array([[0.0, 0.0, 2.0, 2.0], [-99, -1.0j, -1.0j, -1.0j], [4.0, 4.0, 4.0, 4.0]]) b = array([2-1.0j, 4.0-1j, 4+1j, 2+1j]) x = solveh_banded(ab, b) assert_array_almost_equal(x, [0.0, 1.0, 1.0, 0.0]) def test_02_complex(self): # Solve # [ 4 -j 2 0] [2-j 2+4j] # [ j 4 -j 2] X = [4-j -1-j] # [ 2 j 4 -j] [4+j 4+2j] # [ 0 2 j 4] [2+j j] # ab = array([[0.0, 0.0, 2.0, 2.0], [-99, -1.0j, -1.0j, -1.0j], [4.0, 4.0, 4.0, 4.0]]) b = array([[2-1j, 2+4j], [4.0-1j, -1-1j], [4.0+1j, 4+2j], [2+1j, 1j]]) x = solveh_banded(ab, b) expected = array([[0.0, 1.0j], [1.0, 0.0], [1.0, 1.0], [0.0, 0.0]]) assert_array_almost_equal(x, expected) def test_tridiag_01_upper(self): # Solve # [ 4 1 0] [1] # [ 1 4 1] X = [4] # [ 0 1 4] [1] # with the RHS as a 1D array. ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]]) b = array([1.0, 4.0, 1.0]) x = solveh_banded(ab, b) assert_array_almost_equal(x, [0.0, 1.0, 0.0]) def test_tridiag_02_upper(self): # Solve # [ 4 1 0] [1 4] # [ 1 4 1] X = [4 2] # [ 0 1 4] [1 4] # ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]]) b = array([[1.0, 4.0], [4.0, 2.0], [1.0, 4.0]]) x = solveh_banded(ab, b) expected = array([[0.0, 1.0], [1.0, 0.0], [0.0, 1.0]]) assert_array_almost_equal(x, expected) def test_tridiag_03_upper(self): # Solve # [ 4 1 0] [1] # [ 1 4 1] X = [4] # [ 0 1 4] [1] # with the RHS as a 2D array with shape (3,1). ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]]) b = array([1.0, 4.0, 1.0]).reshape(-1, 1) x = solveh_banded(ab, b) assert_array_almost_equal(x, array([0.0, 1.0, 0.0]).reshape(-1, 1)) def test_tridiag_01_lower(self): # Solve # [ 4 1 0] [1] # [ 1 4 1] X = [4] # [ 0 1 4] [1] # ab = array([[4.0, 4.0, 4.0], [1.0, 1.0, -99]]) b = array([1.0, 4.0, 1.0]) x = solveh_banded(ab, b, lower=True) assert_array_almost_equal(x, [0.0, 1.0, 0.0]) def test_tridiag_02_lower(self): # Solve # [ 4 1 0] [1 4] # [ 1 4 1] X = [4 2] # [ 0 1 4] [1 4] # ab = array([[4.0, 4.0, 4.0], [1.0, 1.0, -99]]) b = array([[1.0, 4.0], [4.0, 2.0], [1.0, 4.0]]) x = solveh_banded(ab, b, lower=True) expected = array([[0.0, 1.0], [1.0, 0.0], [0.0, 1.0]]) assert_array_almost_equal(x, expected) def test_tridiag_01_float32(self): # Solve # [ 4 1 0] [1] # [ 1 4 1] X = [4] # [ 0 1 4] [1] # ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]], dtype=float32) b = array([1.0, 4.0, 1.0], dtype=float32) x = solveh_banded(ab, b) assert_array_almost_equal(x, [0.0, 1.0, 0.0]) def test_tridiag_02_float32(self): # Solve # [ 4 1 0] [1 4] # [ 1 4 1] X = [4 2] # [ 0 1 4] [1 4] # ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]], dtype=float32) b = array([[1.0, 4.0], [4.0, 2.0], [1.0, 4.0]], dtype=float32) x = solveh_banded(ab, b) expected = array([[0.0, 1.0], [1.0, 0.0], [0.0, 1.0]]) assert_array_almost_equal(x, expected) def test_tridiag_01_complex(self): # Solve # [ 4 -j 0] [ -j] # [ j 4 -j] X = [4-j] # [ 0 j 4] [4+j] # ab = array([[-99, -1.0j, -1.0j], [4.0, 4.0, 4.0]]) b = array([-1.0j, 4.0-1j, 4+1j]) x = solveh_banded(ab, b) assert_array_almost_equal(x, [0.0, 1.0, 1.0]) def test_tridiag_02_complex(self): # Solve # [ 4 -j 0] [ -j 4j] # [ j 4 -j] X = [4-j -1-j] # [ 0 j 4] [4+j 4 ] # ab = array([[-99, -1.0j, -1.0j], [4.0, 4.0, 4.0]]) b = array([[-1j, 4.0j], [4.0-1j, -1.0-1j], [4.0+1j, 4.0]]) x = solveh_banded(ab, b) expected = array([[0.0, 1.0j], [1.0, 0.0], [1.0, 1.0]]) assert_array_almost_equal(x, expected) def test_check_finite(self): # Solve # [ 4 1 0] [1] # [ 1 4 1] X = [4] # [ 0 1 4] [1] # with the RHS as a 1D array. ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]]) b = array([1.0, 4.0, 1.0]) x = solveh_banded(ab, b, check_finite=False) assert_array_almost_equal(x, [0.0, 1.0, 0.0]) def test_bad_shapes(self): ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]]) b = array([[1.0, 4.0], [4.0, 2.0]]) assert_raises(ValueError, solveh_banded, ab, b) assert_raises(ValueError, solveh_banded, ab, [1.0, 2.0]) assert_raises(ValueError, solveh_banded, ab, [1.0]) def test_1x1(self): x = solveh_banded([[1]], [[1, 2, 3]]) assert_array_equal(x, [[1.0, 2.0, 3.0]]) assert_equal(x.dtype, np.dtype('f8')) def test_native_list_arguments(self): # Same as test_01_upper, using python's native list. ab = [[0.0, 0.0, 2.0, 2.0], [-99, 1.0, 1.0, 1.0], [4.0, 4.0, 4.0, 4.0]] b = [1.0, 4.0, 1.0, 2.0] x = solveh_banded(ab, b) assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0]) class TestSolve(TestCase): def setUp(self): np.random.seed(1234) def test_20Feb04_bug(self): a = [[1, 1], [1.0, 0]] # ok x0 = solve(a, [1, 0j]) assert_array_almost_equal(dot(a, x0), [1, 0]) # gives failure with clapack.zgesv(..,rowmajor=0) a = [[1, 1], [1.2, 0]] b = [1, 0j] x0 = solve(a, b) assert_array_almost_equal(dot(a, x0), [1, 0]) def test_simple(self): a = [[1, 20], [-30, 4]] for b in ([[1, 0], [0, 1]], [1, 0], [[2, 1], [-30, 4]]): x = solve(a, b) assert_array_almost_equal(dot(a, x), b) def test_simple_sym(self): a = [[2, 3], [3, 5]] for lower in [0, 1]: for b in ([[1, 0], [0, 1]], [1, 0]): x = solve(a, b, sym_pos=1, lower=lower) assert_array_almost_equal(dot(a, x), b) def test_simple_sym_complex(self): a = [[5, 2], [2, 4]] for b in [[1j, 0], [[1j, 1j], [0, 2]], ]: x = solve(a, b, sym_pos=1) assert_array_almost_equal(dot(a, x), b) def test_simple_complex(self): a = array([[5, 2], [2j, 4]], 'D') for b in [[1j, 0], [[1j, 1j], [0, 2]], [1, 0j], array([1, 0], 'D'), ]: x = solve(a, b) assert_array_almost_equal(dot(a, x), b) def test_nils_20Feb04(self): n = 2 A = random([n, n])+random([n, n])*1j X = zeros((n, n), 'D') Ainv = inv(A) R = identity(n)+identity(n)*0j for i in arange(0, n): r = R[:, i] X[:, i] = solve(A, r) assert_array_almost_equal(X, Ainv) def test_random(self): n = 20 a = random([n, n]) for i in range(n): a[i, i] = 20*(.1+a[i, i]) for i in range(4): b = random([n, 3]) x = solve(a, b) assert_array_almost_equal(dot(a, x), b) def test_random_complex(self): n = 20 a = random([n, n]) + 1j * random([n, n]) for i in range(n): a[i, i] = 20*(.1+a[i, i]) for i in range(2): b = random([n, 3]) x = solve(a, b) assert_array_almost_equal(dot(a, x), b) def test_random_sym(self): n = 20 a = random([n, n]) for i in range(n): a[i, i] = abs(20*(.1+a[i, i])) for j in range(i): a[i, j] = a[j, i] for i in range(4): b = random([n]) x = solve(a, b, sym_pos=1) assert_array_almost_equal(dot(a, x), b) def test_random_sym_complex(self): n = 20 a = random([n, n]) # XXX: with the following addition the accuracy will be very low a = a + 1j*random([n, n]) for i in range(n): a[i, i] = abs(20*(.1+a[i, i])) for j in range(i): a[i, j] = conjugate(a[j, i]) b = random([n])+2j*random([n]) for i in range(2): x = solve(a, b, sym_pos=1) assert_array_almost_equal(dot(a, x), b) def test_check_finite(self): a = [[1, 20], [-30, 4]] for b in ([[1, 0], [0, 1]], [1, 0], [[2, 1], [-30, 4]]): x = solve(a, b, check_finite=False) assert_array_almost_equal(dot(a, x), b) def test_scalar_a_and_1D_b(self): a = 1 b = [1, 2, 3] x = solve(a, b) assert_array_almost_equal(x.ravel(), b) assert_(x.shape == (3,), 'Scalar_a_1D_b test returned wrong shape') def test_simple2(self): a = np.array([[1.80, 2.88, 2.05, -0.89], [525.00, -295.00, -95.00, -380.00], [1.58, -2.69, -2.90, -1.04], [-1.11, -0.66, -0.59, 0.80]]) b = np.array([[9.52, 18.47], [2435.00, 225.00], [0.77, -13.28], [-6.22, -6.21]]) x = solve(a, b) assert_array_almost_equal(x, np.array([[1., -1, 3, -5], [3, 2, 4, 1]]).T) def test_simple_complex2(self): a = np.array([[-1.34+2.55j, 0.28+3.17j, -6.39-2.20j, 0.72-0.92j], [-1.70-14.10j, 33.10-1.50j, -1.50+13.40j, 12.90+13.80j], [-3.29-2.39j, -1.91+4.42j, -0.14-1.35j, 1.72+1.35j], [2.41+0.39j, -0.56+1.47j, -0.83-0.69j, -1.96+0.67j]]) b = np.array([[26.26+51.78j, 31.32-6.70j], [64.30-86.80j, 158.60-14.20j], [-5.75+25.31j, -2.15+30.19j], [1.16+2.57j, -2.56+7.55j]]) x = solve(a, b) assert_array_almost_equal(x, np. array([[1+1.j, -1-2.j], [2-3.j, 5+1.j], [-4-5.j, -3+4.j], [6.j, 2-3.j]])) def test_hermitian(self): # An upper triangular matrix will be used for hermitian matrix a a = np.array([[-1.84, 0.11-0.11j, -1.78-1.18j, 3.91-1.50j], [0, -4.63, -1.84+0.03j, 2.21+0.21j], [0, 0, -8.87, 1.58-0.90j], [0, 0, 0, -1.36]]) b = np.array([[2.98-10.18j, 28.68-39.89j], [-9.58+3.88j, -24.79-8.40j], [-0.77-16.05j, 4.23-70.02j], [7.79+5.48j, -35.39+18.01j]]) res = np.array([[2.+1j, -8+6j], [3.-2j, 7-2j], [-1+2j, -1+5j], [1.-1j, 3-4j]]) x = solve(a, b, assume_a='her') assert_array_almost_equal(x, res) # Also conjugate a and test for lower triangular data x = solve(a.conj().T, b, assume_a='her', lower=True) assert_array_almost_equal(x, res) def test_pos_and_sym(self): A = np.arange(1, 10).reshape(3, 3) x = solve(np.tril(A)/9, np.ones(3), assume_a='pos') assert_array_almost_equal(x, [9., 1.8, 1.]) x = solve(np.tril(A)/9, np.ones(3), assume_a='sym') assert_array_almost_equal(x, [9., 1.8, 1.]) def test_singularity(self): a = np.array([[1, 0, 0, 0, 0, 0, 1, 0, 1], [1, 1, 1, 0, 0, 0, 1, 0, 1], [0, 1, 1, 0, 0, 0, 1, 0, 1], [1, 0, 1, 1, 1, 1, 0, 0, 0], [1, 0, 1, 1, 1, 1, 0, 0, 0], [1, 0, 1, 1, 1, 1, 0, 0, 0], [1, 0, 1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1]]) b = np.arange(9)[:, None] assert_raises(LinAlgError, solve, a, b) def test_ill_condition_warning(self): a = np.array([[1, 1], [1+1e-16, 1-1e-16]]) b = np.ones(2) with warnings.catch_warnings(): warnings.simplefilter('error') assert_raises(RuntimeWarning, solve, a, b) def test_empty_rhs(self): a = np.eye(2) b = [[], []] x = solve(a, b) assert_(x.size == 0, 'Returned array is not empty') assert_(x.shape == (2, 0), 'Returned empty array shape is wrong') def test_multiple_rhs(self): a = np.eye(2) b = np.random.rand(2, 3, 4) x = solve(a, b) assert_array_almost_equal(x, b) def test_transposed_keyword(self): A = np.arange(9).reshape(3, 3) + 1 x = solve(np.tril(A)/9, np.ones(3), transposed=1) assert_array_almost_equal(x, [1.2, 0.2, 1]) x = solve(np.tril(A)/9, np.ones(3), transposed=0) assert_array_almost_equal(x, [9, -5.4, -1.2]) def test_nonsquare_a(self): assert_raises(ValueError, solve, [1, 2], 1) def test_size_mismatch_with_1D_b(self): assert_array_almost_equal(solve(np.eye(3), np.ones(3)), np.ones(3)) assert_raises(ValueError, solve, np.eye(3), np.ones(4)) def test_assume_a_keyword(self): assert_raises(ValueError, solve, 1, 1, assume_a='zxcv') def test_all_type_size_routine_combinations(self): sizes = [10, 100, 1000] assume_as = ['gen', 'sym', 'pos', 'her'] dtypes = [np.float32, np.float64, np.complex64, np.complex128] for size, assume_a, dtype in itertools.product(sizes, assume_as, dtypes): is_complex = dtype in (np.complex64, np.complex128) if assume_a == 'her' and not is_complex: continue err_msg = ("Failed for size: {}, assume_a: {}," "dtype: {}".format(size, assume_a, dtype)) a = np.random.randn(size, size).astype(dtype) b = np.random.randn(size).astype(dtype) if is_complex: a = a + (1j*np.random.randn(size, size)).astype(dtype) if assume_a == 'sym': # Can still be complex but only symmetric a = a + a.T elif assume_a == 'her': # Handle hermitian matrices here instead a = a + a.T.conj() elif assume_a == 'pos': a = a.conj().T.dot(a) + 0.1*np.eye(size) x = solve(a, b, assume_a=assume_a) tol = 1e-12 if dtype in (np.float64, np.complex128) else 1e-6 assert_allclose(a.dot(x), b, atol=tol * size, rtol=tol * size, err_msg=err_msg) class TestSolveTriangular(TestCase): def test_simple(self): """ solve_triangular on a simple 2x2 matrix. """ A = array([[1, 0], [1, 2]]) b = [1, 1] sol = solve_triangular(A, b, lower=True) assert_array_almost_equal(sol, [1, 0]) # check that it works also for non-contiguous matrices sol = solve_triangular(A.T, b, lower=False) assert_array_almost_equal(sol, [.5, .5]) # and that it gives the same result as trans=1 sol = solve_triangular(A, b, lower=True, trans=1) assert_array_almost_equal(sol, [.5, .5]) b = identity(2) sol = solve_triangular(A, b, lower=True, trans=1) assert_array_almost_equal(sol, [[1., -.5], [0, 0.5]]) def test_simple_complex(self): """ solve_triangular on a simple 2x2 complex matrix """ A = array([[1+1j, 0], [1j, 2]]) b = identity(2) sol = solve_triangular(A, b, lower=True, trans=1) assert_array_almost_equal(sol, [[.5-.5j, -.25-.25j], [0, 0.5]]) def test_check_finite(self): """ solve_triangular on a simple 2x2 matrix. """ A = array([[1, 0], [1, 2]]) b = [1, 1] sol = solve_triangular(A, b, lower=True, check_finite=False) assert_array_almost_equal(sol, [1, 0]) class TestInv(TestCase): def setUp(self): np.random.seed(1234) def test_simple(self): a = [[1, 2], [3, 4]] a_inv = inv(a) assert_array_almost_equal(dot(a, a_inv), np.eye(2)) a = [[1, 2, 3], [4, 5, 6], [7, 8, 10]] a_inv = inv(a) assert_array_almost_equal(dot(a, a_inv), np.eye(3)) def test_random(self): n = 20 for i in range(4): a = random([n, n]) for i in range(n): a[i, i] = 20*(.1+a[i, i]) a_inv = inv(a) assert_array_almost_equal(dot(a, a_inv), identity(n)) def test_simple_complex(self): a = [[1, 2], [3, 4j]] a_inv = inv(a) assert_array_almost_equal(dot(a, a_inv), [[1, 0], [0, 1]]) def test_random_complex(self): n = 20 for i in range(4): a = random([n, n])+2j*random([n, n]) for i in range(n): a[i, i] = 20*(.1+a[i, i]) a_inv = inv(a) assert_array_almost_equal(dot(a, a_inv), identity(n)) def test_check_finite(self): a = [[1, 2], [3, 4]] a_inv = inv(a, check_finite=False) assert_array_almost_equal(dot(a, a_inv), [[1, 0], [0, 1]]) class TestDet(TestCase): def setUp(self): np.random.seed(1234) def test_simple(self): a = [[1, 2], [3, 4]] a_det = det(a) assert_almost_equal(a_det, -2.0) def test_simple_complex(self): a = [[1, 2], [3, 4j]] a_det = det(a) assert_almost_equal(a_det, -6+4j) def test_random(self): basic_det = linalg.det n = 20 for i in range(4): a = random([n, n]) d1 = det(a) d2 = basic_det(a) assert_almost_equal(d1, d2) def test_random_complex(self): basic_det = linalg.det n = 20 for i in range(4): a = random([n, n]) + 2j*random([n, n]) d1 = det(a) d2 = basic_det(a) assert_allclose(d1, d2, rtol=1e-13) def test_check_finite(self): a = [[1, 2], [3, 4]] a_det = det(a, check_finite=False) assert_almost_equal(a_det, -2.0) def direct_lstsq(a, b, cmplx=0): at = transpose(a) if cmplx: at = conjugate(at) a1 = dot(at, a) b1 = dot(at, b) return solve(a1, b1) class TestLstsq(TestCase): lapack_drivers = ('gelsd', 'gelss', 'gelsy', None) def setUp(self): np.random.seed(1234) def test_simple_exact(self): for dtype in REAL_DTYPES: a = np.array([[1, 20], [-30, 4]], dtype=dtype) for lapack_driver in TestLstsq.lapack_drivers: for overwrite in (True, False): for bt in (((1, 0), (0, 1)), (1, 0), ((2, 1), (-30, 4))): # Store values in case they are overwritten # later a1 = a.copy() b = np.array(bt, dtype=dtype) b1 = b.copy() try: out = lstsq(a1, b1, lapack_driver=lapack_driver, overwrite_a=overwrite, overwrite_b=overwrite) except LstsqLapackError: if lapack_driver is None: mesg = ('LstsqLapackError raised with ' 'lapack_driver being None.') raise AssertionError(mesg) else: # can't proceed, skip to the next iteration continue x = out[0] r = out[2] assert_(r == 2, 'expected efficient rank 2, got %s' % r) assert_allclose( dot(a, x), b, atol=25 * _eps_cast(a1.dtype), rtol=25 * _eps_cast(a1.dtype), err_msg="driver: %s" % lapack_driver) def test_simple_overdet(self): for dtype in REAL_DTYPES: a = np.array([[1, 2], [4, 5], [3, 4]], dtype=dtype) b = np.array([1, 2, 3], dtype=dtype) for lapack_driver in TestLstsq.lapack_drivers: for overwrite in (True, False): # Store values in case they are overwritten later a1 = a.copy() b1 = b.copy() try: out = lstsq(a1, b1, lapack_driver=lapack_driver, overwrite_a=overwrite, overwrite_b=overwrite) except LstsqLapackError: if lapack_driver is None: mesg = ('LstsqLapackError raised with ' 'lapack_driver being None.') raise AssertionError(mesg) else: # can't proceed, skip to the next iteration continue x = out[0] if lapack_driver == 'gelsy': residuals = np.sum((b - a.dot(x))**2) else: residuals = out[1] r = out[2] assert_(r == 2, 'expected efficient rank 2, got %s' % r) assert_allclose(abs((dot(a, x) - b)**2).sum(axis=0), residuals, rtol=25 * _eps_cast(a1.dtype), atol=25 * _eps_cast(a1.dtype), err_msg="driver: %s" % lapack_driver) assert_allclose(x, (-0.428571428571429, 0.85714285714285), rtol=25 * _eps_cast(a1.dtype), atol=25 * _eps_cast(a1.dtype), err_msg="driver: %s" % lapack_driver) def test_simple_overdet_complex(self): for dtype in COMPLEX_DTYPES: a = np.array([[1+2j, 2], [4, 5], [3, 4]], dtype=dtype) b = np.array([1, 2+4j, 3], dtype=dtype) for lapack_driver in TestLstsq.lapack_drivers: for overwrite in (True, False): # Store values in case they are overwritten later a1 = a.copy() b1 = b.copy() try: out = lstsq(a1, b1, lapack_driver=lapack_driver, overwrite_a=overwrite, overwrite_b=overwrite) except LstsqLapackError: if lapack_driver is None: mesg = ('LstsqLapackError raised with ' 'lapack_driver being None.') raise AssertionError(mesg) else: # can't proceed, skip to the next iteration continue x = out[0] if lapack_driver == 'gelsy': res = b - a.dot(x) residuals = np.sum(res * res.conj()) else: residuals = out[1] r = out[2] assert_(r == 2, 'expected efficient rank 2, got %s' % r) assert_allclose(abs((dot(a, x) - b)**2).sum(axis=0), residuals, rtol=25 * _eps_cast(a1.dtype), atol=25 * _eps_cast(a1.dtype), err_msg="driver: %s" % lapack_driver) assert_allclose( x, (-0.4831460674157303 + 0.258426966292135j, 0.921348314606741 + 0.292134831460674j), rtol=25 * _eps_cast(a1.dtype), atol=25 * _eps_cast(a1.dtype), err_msg="driver: %s" % lapack_driver) def test_simple_underdet(self): for dtype in REAL_DTYPES: a = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype) b = np.array([1, 2], dtype=dtype) for lapack_driver in TestLstsq.lapack_drivers: for overwrite in (True, False): # Store values in case they are overwritten later a1 = a.copy() b1 = b.copy() try: out = lstsq(a1, b1, lapack_driver=lapack_driver, overwrite_a=overwrite, overwrite_b=overwrite) except LstsqLapackError: if lapack_driver is None: mesg = ('LstsqLapackError raised with ' 'lapack_driver being None.') raise AssertionError(mesg) else: # can't proceed, skip to the next iteration continue x = out[0] r = out[2] assert_(r == 2, 'expected efficient rank 2, got %s' % r) assert_allclose(x, (-0.055555555555555, 0.111111111111111, 0.277777777777777), rtol=25 * _eps_cast(a1.dtype), atol=25 * _eps_cast(a1.dtype), err_msg="driver: %s" % lapack_driver) def test_random_exact(self): for dtype in REAL_DTYPES: for n in (20, 200): for lapack_driver in TestLstsq.lapack_drivers: for overwrite in (True, False): a = np.asarray(random([n, n]), dtype=dtype) for i in range(n): a[i, i] = 20 * (0.1 + a[i, i]) for i in range(4): b = np.asarray(random([n, 3]), dtype=dtype) # Store values in case they are overwritten later a1 = a.copy() b1 = b.copy() try: out = lstsq(a1, b1, lapack_driver=lapack_driver, overwrite_a=overwrite, overwrite_b=overwrite) except LstsqLapackError: if lapack_driver is None: mesg = ('LstsqLapackError raised with ' 'lapack_driver being None.') raise AssertionError(mesg) else: # can't proceed, skip to the next iteration continue x = out[0] r = out[2] assert_(r == n, 'expected efficient rank %s, ' 'got %s' % (n, r)) if dtype is np.float32: assert_allclose( dot(a, x), b, rtol=500 * _eps_cast(a1.dtype), atol=500 * _eps_cast(a1.dtype), err_msg="driver: %s" % lapack_driver) else: assert_allclose( dot(a, x), b, rtol=1000 * _eps_cast(a1.dtype), atol=1000 * _eps_cast(a1.dtype), err_msg="driver: %s" % lapack_driver) def test_random_complex_exact(self): for dtype in COMPLEX_DTYPES: for n in (20, 200): for lapack_driver in TestLstsq.lapack_drivers: for overwrite in (True, False): a = np.asarray(random([n, n]) + 1j*random([n, n]), dtype=dtype) for i in range(n): a[i, i] = 20 * (0.1 + a[i, i]) for i in range(2): b = np.asarray(random([n, 3]), dtype=dtype) # Store values in case they are overwritten later a1 = a.copy() b1 = b.copy() out = lstsq(a1, b1, lapack_driver=lapack_driver, overwrite_a=overwrite, overwrite_b=overwrite) x = out[0] r = out[2] assert_(r == n, 'expected efficient rank %s, ' 'got %s' % (n, r)) if dtype is np.complex64: assert_allclose( dot(a, x), b, rtol=400 * _eps_cast(a1.dtype), atol=400 * _eps_cast(a1.dtype), err_msg="driver: %s" % lapack_driver) else: assert_allclose( dot(a, x), b, rtol=1000 * _eps_cast(a1.dtype), atol=1000 * _eps_cast(a1.dtype), err_msg="driver: %s" % lapack_driver) def test_random_overdet(self): for dtype in REAL_DTYPES: for (n, m) in ((20, 15), (200, 2)): for lapack_driver in TestLstsq.lapack_drivers: for overwrite in (True, False): a = np.asarray(random([n, m]), dtype=dtype) for i in range(m): a[i, i] = 20 * (0.1 + a[i, i]) for i in range(4): b = np.asarray(random([n, 3]), dtype=dtype) # Store values in case they are overwritten later a1 = a.copy() b1 = b.copy() try: out = lstsq(a1, b1, lapack_driver=lapack_driver, overwrite_a=overwrite, overwrite_b=overwrite) except LstsqLapackError: if lapack_driver is None: mesg = ('LstsqLapackError raised with ' 'lapack_driver being None.') raise AssertionError(mesg) else: # can't proceed, skip to the next iteration continue x = out[0] r = out[2] assert_(r == m, 'expected efficient rank %s, ' 'got %s' % (m, r)) assert_allclose( x, direct_lstsq(a, b, cmplx=0), rtol=25 * _eps_cast(a1.dtype), atol=25 * _eps_cast(a1.dtype), err_msg="driver: %s" % lapack_driver) def test_random_complex_overdet(self): for dtype in COMPLEX_DTYPES: for (n, m) in ((20, 15), (200, 2)): for lapack_driver in TestLstsq.lapack_drivers: for overwrite in (True, False): a = np.asarray(random([n, m]) + 1j*random([n, m]), dtype=dtype) for i in range(m): a[i, i] = 20 * (0.1 + a[i, i]) for i in range(2): b = np.asarray(random([n, 3]), dtype=dtype) # Store values in case they are overwritten # later a1 = a.copy() b1 = b.copy() out = lstsq(a1, b1, lapack_driver=lapack_driver, overwrite_a=overwrite, overwrite_b=overwrite) x = out[0] r = out[2] assert_(r == m, 'expected efficient rank %s, ' 'got %s' % (m, r)) assert_allclose( x, direct_lstsq(a, b, cmplx=1), rtol=25 * _eps_cast(a1.dtype), atol=25 * _eps_cast(a1.dtype), err_msg="driver: %s" % lapack_driver) def test_check_finite(self): for dtype in REAL_DTYPES: a = np.array(((1, 20), (-30, 4)), dtype=dtype) for bt in (((1, 0), (0, 1)), (1, 0), ((2, 1), (-30, 4))): for lapack_driver in TestLstsq.lapack_drivers: for overwrite in (True, False): for check_finite in (True, False): b = np.array(bt, dtype=dtype) # Store values in case they are overwritten # later a1 = a.copy() b1 = b.copy() try: out = lstsq(a1, b1, lapack_driver=lapack_driver, check_finite=check_finite, overwrite_a=overwrite, overwrite_b=overwrite) except LstsqLapackError: if lapack_driver is None: mesg = ('LstsqLapackError raised with ' 'lapack_driver being None.') raise AssertionError(mesg) else: # can't proceed, # skip to the next iteration continue x = out[0] r = out[2] assert_(r == 2, 'expected efficient rank 2, ' 'got %s' % r) assert_allclose( dot(a, x), b, rtol=25 * _eps_cast(a.dtype), atol=25 * _eps_cast(a.dtype), err_msg="driver: %s" % lapack_driver) class TestPinv(TestCase): def test_simple_real(self): a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float) a_pinv = pinv(a) assert_array_almost_equal(dot(a, a_pinv), np.eye(3)) a_pinv = pinv2(a) assert_array_almost_equal(dot(a, a_pinv), np.eye(3)) def test_simple_complex(self): a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float) + 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]], dtype=float)) a_pinv = pinv(a) assert_array_almost_equal(dot(a, a_pinv), np.eye(3)) a_pinv = pinv2(a) assert_array_almost_equal(dot(a, a_pinv), np.eye(3)) def test_simple_singular(self): a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=float) a_pinv = pinv(a) a_pinv2 = pinv2(a) assert_array_almost_equal(a_pinv, a_pinv2) def test_simple_cols(self): a = array([[1, 2, 3], [4, 5, 6]], dtype=float) a_pinv = pinv(a) a_pinv2 = pinv2(a) assert_array_almost_equal(a_pinv, a_pinv2) def test_simple_rows(self): a = array([[1, 2], [3, 4], [5, 6]], dtype=float) a_pinv = pinv(a) a_pinv2 = pinv2(a) assert_array_almost_equal(a_pinv, a_pinv2) def test_check_finite(self): a = array([[1, 2, 3], [4, 5, 6.], [7, 8, 10]]) a_pinv = pinv(a, check_finite=False) assert_array_almost_equal(dot(a, a_pinv), np.eye(3)) a_pinv = pinv2(a, check_finite=False) assert_array_almost_equal(dot(a, a_pinv), np.eye(3)) def test_native_list_argument(self): a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] a_pinv = pinv(a) a_pinv2 = pinv2(a) assert_array_almost_equal(a_pinv, a_pinv2) class TestPinvSymmetric(TestCase): def test_simple_real(self): a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float) a = np.dot(a, a.T) a_pinv = pinvh(a) assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3)) def test_nonpositive(self): a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=float) a = np.dot(a, a.T) u, s, vt = np.linalg.svd(a) s[0] *= -1 a = np.dot(u * s, vt) # a is now symmetric non-positive and singular a_pinv = pinv2(a) a_pinvh = pinvh(a) assert_array_almost_equal(a_pinv, a_pinvh) def test_simple_complex(self): a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float) + 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]], dtype=float)) a = np.dot(a, a.conj().T) a_pinv = pinvh(a) assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3)) def test_native_list_argument(self): a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float) a = np.dot(a, a.T) a_pinv = pinvh(a.tolist()) assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3)) class TestVectorNorms(object): def test_types(self): for dtype in np.typecodes['AllFloat']: x = np.array([1, 2, 3], dtype=dtype) tol = max(1e-15, np.finfo(dtype).eps.real * 20) assert_allclose(norm(x), np.sqrt(14), rtol=tol) assert_allclose(norm(x, 2), np.sqrt(14), rtol=tol) for dtype in np.typecodes['Complex']: x = np.array([1j, 2j, 3j], dtype=dtype) tol = max(1e-15, np.finfo(dtype).eps.real * 20) assert_allclose(norm(x), np.sqrt(14), rtol=tol) assert_allclose(norm(x, 2), np.sqrt(14), rtol=tol) def test_overflow(self): # unlike numpy's norm, this one is # safer on overflow a = array([1e20], dtype=float32) assert_almost_equal(norm(a), a) def test_stable(self): # more stable than numpy's norm a = array([1e4] + [1]*10000, dtype=float32) try: # snrm in double precision; we obtain the same as for float64 # -- large atol needed due to varying blas implementations assert_allclose(norm(a) - 1e4, 0.5, atol=1e-2) except AssertionError: # snrm implemented in single precision, == np.linalg.norm result msg = ": Result should equal either 0.0 or 0.5 (depending on " \ "implementation of snrm2)." assert_almost_equal(norm(a) - 1e4, 0.0, err_msg=msg) def test_zero_norm(self): assert_equal(norm([1, 0, 3], 0), 2) assert_equal(norm([1, 2, 3], 0), 3) def test_axis_kwd(self): a = np.array([[[2, 1], [3, 4]]] * 2, 'd') assert_allclose(norm(a, axis=1), [[3.60555128, 4.12310563]] * 2) assert_allclose(norm(a, 1, axis=1), [[5.] * 2] * 2) @dec.skipif(NumpyVersion(np.__version__) < '1.10.0') def test_keepdims_kwd(self): a = np.array([[[2, 1], [3, 4]]] * 2, 'd') b = norm(a, axis=1, keepdims=True) assert_allclose(b, [[[3.60555128, 4.12310563]]] * 2) assert_(b.shape == (2, 1, 2)) assert_allclose(norm(a, 1, axis=2, keepdims=True), [[[3.], [7.]]] * 2) class TestMatrixNorms(object): def test_matrix_norms(self): # Not all of these are matrix norms in the most technical sense. np.random.seed(1234) for n, m in (1, 1), (1, 3), (3, 1), (4, 4), (4, 5), (5, 4): for t in np.single, np.double, np.csingle, np.cdouble, np.int64: A = 10 * np.random.randn(n, m).astype(t) if np.issubdtype(A.dtype, np.complexfloating): A = (A + 10j * np.random.randn(n, m)).astype(t) t_high = np.cdouble else: t_high = np.double for order in (None, 'fro', 1, -1, 2, -2, np.inf, -np.inf): actual = norm(A, ord=order) desired = np.linalg.norm(A, ord=order) # SciPy may return higher precision matrix norms. # This is a consequence of using LAPACK. if not np.allclose(actual, desired): desired = np.linalg.norm(A.astype(t_high), ord=order) assert_allclose(actual, desired) def test_axis_kwd(self): a = np.array([[[2, 1], [3, 4]]] * 2, 'd') b = norm(a, ord=np.inf, axis=(1, 0)) c = norm(np.swapaxes(a, 0, 1), ord=np.inf, axis=(0, 1)) d = norm(a, ord=1, axis=(0, 1)) assert_allclose(b, c) assert_allclose(c, d) assert_allclose(b, d) assert_(b.shape == c.shape == d.shape) b = norm(a, ord=1, axis=(1, 0)) c = norm(np.swapaxes(a, 0, 1), ord=1, axis=(0, 1)) d = norm(a, ord=np.inf, axis=(0, 1)) assert_allclose(b, c) assert_allclose(c, d) assert_allclose(b, d) assert_(b.shape == c.shape == d.shape) @dec.skipif(NumpyVersion(np.__version__) < '1.10.0') def test_keepdims_kwd(self): a = np.arange(120, dtype='d').reshape(2, 3, 4, 5) b = norm(a, ord=np.inf, axis=(1, 0), keepdims=True) c = norm(a, ord=1, axis=(0, 1), keepdims=True) assert_allclose(b, c) assert_(b.shape == c.shape) class TestOverwrite(object): def test_solve(self): assert_no_overwrite(solve, [(3, 3), (3,)]) def test_solve_triangular(self): assert_no_overwrite(solve_triangular, [(3, 3), (3,)]) def test_solve_banded(self): assert_no_overwrite(lambda ab, b: solve_banded((2, 1), ab, b), [(4, 6), (6,)]) def test_solveh_banded(self): assert_no_overwrite(solveh_banded, [(2, 6), (6,)]) def test_inv(self): assert_no_overwrite(inv, [(3, 3)]) def test_det(self): assert_no_overwrite(det, [(3, 3)]) def test_lstsq(self): assert_no_overwrite(lstsq, [(3, 2), (3,)]) def test_pinv(self): assert_no_overwrite(pinv, [(3, 3)]) def test_pinv2(self): assert_no_overwrite(pinv2, [(3, 3)]) def test_pinvh(self): assert_no_overwrite(pinvh, [(3, 3)]) class TestSolveCirculant(TestCase): def test_basic1(self): c = np.array([1, 2, 3, 5]) b = np.array([1, -1, 1, 0]) x = solve_circulant(c, b) y = solve(circulant(c), b) assert_allclose(x, y) def test_basic2(self): # b is a 2-d matrix. c = np.array([1, 2, -3, -5]) b = np.arange(12).reshape(4, 3) x = solve_circulant(c, b) y = solve(circulant(c), b) assert_allclose(x, y) def test_basic3(self): # b is a 3-d matrix. c = np.array([1, 2, -3, -5]) b = np.arange(24).reshape(4, 3, 2) x = solve_circulant(c, b) y = solve(circulant(c), b) assert_allclose(x, y) def test_complex(self): # Complex b and c c = np.array([1+2j, -3, 4j, 5]) b = np.arange(8).reshape(4, 2) + 0.5j x = solve_circulant(c, b) y = solve(circulant(c), b) assert_allclose(x, y) def test_random_b_and_c(self): # Random b and c np.random.seed(54321) c = np.random.randn(50) b = np.random.randn(50) x = solve_circulant(c, b) y = solve(circulant(c), b) assert_allclose(x, y) def test_singular(self): # c gives a singular circulant matrix. c = np.array([1, 1, 0, 0]) b = np.array([1, 2, 3, 4]) x = solve_circulant(c, b, singular='lstsq') y, res, rnk, s = lstsq(circulant(c), b) assert_allclose(x, y) assert_raises(LinAlgError, solve_circulant, x, y) def test_axis_args(self): # Test use of caxis, baxis and outaxis. # c has shape (2, 1, 4) c = np.array([[[-1, 2.5, 3, 3.5]], [[1, 6, 6, 6.5]]]) # b has shape (3, 4) b = np.array([[0, 0, 1, 1], [1, 1, 0, 0], [1, -1, 0, 0]]) x = solve_circulant(c, b, baxis=1) assert_equal(x.shape, (4, 2, 3)) expected = np.empty_like(x) expected[:, 0, :] = solve(circulant(c[0]), b.T) expected[:, 1, :] = solve(circulant(c[1]), b.T) assert_allclose(x, expected) x = solve_circulant(c, b, baxis=1, outaxis=-1) assert_equal(x.shape, (2, 3, 4)) assert_allclose(np.rollaxis(x, -1), expected) # np.swapaxes(c, 1, 2) has shape (2, 4, 1); b.T has shape (4, 3). x = solve_circulant(np.swapaxes(c, 1, 2), b.T, caxis=1) assert_equal(x.shape, (4, 2, 3)) assert_allclose(x, expected) def test_native_list_arguments(self): # Same as test_basic1 using python's native list. c = [1, 2, 3, 5] b = [1, -1, 1, 0] x = solve_circulant(c, b) y = solve(circulant(c), b) assert_allclose(x, y) class TestMatrix_Balance(TestCase): def test_string_arg(self): assert_raises(ValueError, matrix_balance, 'Some string for fail') def test_infnan_arg(self): assert_raises(ValueError, matrix_balance, np.array([[1, 2], [3, np.inf]])) assert_raises(ValueError, matrix_balance, np.array([[1, 2], [3, np.nan]])) def test_scaling(self): _, y = matrix_balance(np.array([[1000, 1], [1000, 0]])) # Pre/post LAPACK 3.5.0 gives the same result up to an offset # since in each case col norm is x1000 greater and # 1000 / 32 ~= 1 * 32 hence balanced with 2 ** 5. assert_allclose(int(np.diff(np.log2(np.diag(y)))), 5) def test_scaling_order(self): A = np.array([[1, 0, 1e-4], [1, 1, 1e-2], [1e4, 1e2, 1]]) x, y = matrix_balance(A) assert_allclose(solve(y, A).dot(y), x) def test_separate(self): _, (y, z) = matrix_balance(np.array([[1000, 1], [1000, 0]]), separate=1) assert_equal(int(np.diff(np.log2(y))), 5) assert_allclose(z, np.arange(2)) def test_permutation(self): A = block_diag(np.ones((2, 2)), np.tril(np.ones((2, 2))), np.ones((3, 3))) x, (y, z) = matrix_balance(A, separate=1) assert_allclose(y, np.ones_like(y)) assert_allclose(z, np.array([0, 1, 6, 5, 4, 3, 2])) def test_perm_and_scaling(self): # Matrix with its diagonal removed cases = ( # Case 0 np.array([[0., 0., 0., 0., 0.000002], [0., 0., 0., 0., 0.], [2., 2., 0., 0., 0.], [2., 2., 0., 0., 0.], [0., 0., 0.000002, 0., 0.]]), # Case 1 user reported GH-7258 np.array([[-0.5, 0., 0., 0.], [0., -1., 0., 0.], [1., 0., -0.5, 0.], [0., 1., 0., -1.]]), # Case 2 user reported GH-7258 np.array([[-3., 0., 1., 0.], [-1., -1., -0., 1.], [-3., -0., -0., 0.], [-1., -0., 1., -1.]]) ) for A in cases: x, y = matrix_balance(A) x, (s, p) = matrix_balance(A, separate=1) ip = np.empty_like(p) ip[p] = np.arange(A.shape[0]) assert_allclose(y, np.diag(s)[ip, :]) assert_allclose(solve(y, A).dot(y), x) if __name__ == "__main__": run_module_suite()
bsd-3-clause
chiefspace/udemy-rest-api
udemy_rest_flask1/env/lib/python3.4/site-packages/setuptools/command/install_scripts.py
14
2067
from distutils.command.install_scripts import install_scripts \ as _install_scripts from pkg_resources import Distribution, PathMetadata, ensure_directory import os from distutils import log class install_scripts(_install_scripts): """Do normal script install, plus any egg_info wrapper scripts""" def initialize_options(self): _install_scripts.initialize_options(self) self.no_ep = False def run(self): from setuptools.command.easy_install import get_script_args from setuptools.command.easy_install import sys_executable self.run_command("egg_info") if self.distribution.scripts: _install_scripts.run(self) # run first to set up self.outfiles else: self.outfiles = [] if self.no_ep: # don't install entry point scripts into .egg file! return ei_cmd = self.get_finalized_command("egg_info") dist = Distribution( ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info), ei_cmd.egg_name, ei_cmd.egg_version, ) bs_cmd = self.get_finalized_command('build_scripts') executable = getattr(bs_cmd,'executable',sys_executable) is_wininst = getattr( self.get_finalized_command("bdist_wininst"), '_is_running', False ) for args in get_script_args(dist, executable, is_wininst): self.write_script(*args) def write_script(self, script_name, contents, mode="t", *ignored): """Write an executable file to the scripts directory""" from setuptools.command.easy_install import chmod, current_umask log.info("Installing %s script to %s", script_name, self.install_dir) target = os.path.join(self.install_dir, script_name) self.outfiles.append(target) mask = current_umask() if not self.dry_run: ensure_directory(target) f = open(target,"w"+mode) f.write(contents) f.close() chmod(target, 0x1FF-mask) # 0777
gpl-2.0
linostar/timeline-clone
dependencies/timelinelib/pysvg-0.2.1/pysvg/text.py
7
6821
#!/usr/bin/python # -*- coding: iso-8859-1 -*- ''' (C) 2008, 2009 Kerim Mansour For licensing information please refer to license.txt ''' from attributes import * from core import BaseElement, PointAttrib, DeltaPointAttrib, RotateAttrib class altGlyphDef(BaseElement, CoreAttrib): """ Class representing the altGlyphDef element of an svg doc. """ def __init__(self, **kwargs): BaseElement.__init__(self, 'altGlypfDef') self.setKWARGS(**kwargs) class altGlyphItem(BaseElement, CoreAttrib): """ Class representing the altGlyphItem element of an svg doc. """ def __init__(self, **kwargs): BaseElement.__init__(self, 'altGlypfItem') self.setKWARGS(**kwargs) class glyphRef(BaseElement, CoreAttrib, ExternalAttrib, StyleAttrib, FontAttrib, XLinkAttrib, PaintAttrib, PointAttrib, DeltaPointAttrib): """ Class representing the glyphRef element of an svg doc. """ def __init__(self, **kwargs): BaseElement.__init__(self, 'glyphRef') self.setKWARGS(**kwargs) def set_glyphRef(self, glyphRef): self._attributes['glyphRef'] = glyphRef def get_glyphRef(self): return self._attributes.get('glyphRef') def set_format(self, format): self._attributes['format'] = format def get_format(self): return self._attributes.get('format') def set_lengthAdjust(self, lengthAdjust): self._attributes['lengthAdjust'] = lengthAdjust def get_lengthAdjust(self): return self._attributes.get('lengthAdjust') class altGlyph(glyphRef, ConditionalAttrib, GraphicalEventsAttrib, OpacityAttrib, GraphicsAttrib, CursorAttrib, FilterAttrib, MaskAttrib, ClipAttrib, TextContentAttrib, RotateAttrib): """ Class representing the altGlyph element of an svg doc. """ def __init__(self, **kwargs): BaseElement.__init__(self, 'altGlyph') self.setKWARGS(**kwargs) def set_textLength(self, textLength): self._attributes['textLength'] = textLength def get_textLength(self): return self._attributes.get('textLength') class textPath(BaseElement, CoreAttrib, ConditionalAttrib, ExternalAttrib, StyleAttrib, XLinkAttrib, FontAttrib, PaintAttrib, GraphicalEventsAttrib, OpacityAttrib, GraphicsAttrib, CursorAttrib, FilterAttrib, MaskAttrib, ClipAttrib, TextContentAttrib): """ Class representing the textPath element of an svg doc. """ def __init__(self, **kwargs): BaseElement.__init__(self, 'textPath') self.setKWARGS(**kwargs) def set_startOffset(self, startOffset): self._attributes['startOffset'] = startOffset def get_startOffset(self): return self._attributes.get('startOffset') def set_textLength(self, textLength): self._attributes['textLength'] = textLength def get_textLength(self): return self._attributes.get('textLength') def set_lengthAdjust(self, lengthAdjust): self._attributes['lengthAdjust'] = lengthAdjust def get_lengthAdjust(self): return self._attributes.get('lengthAdjust') def set_method(self, method): self._attributes['method'] = method def get_method(self): return self._attributes.get('method') def set_spacing(self, spacing): self._attributes['spacing'] = spacing def get_spacing(self): return self._attributes.get('spacing') class tref(BaseElement, CoreAttrib, ConditionalAttrib, ExternalAttrib, StyleAttrib, XLinkAttrib, PointAttrib, DeltaPointAttrib, RotateAttrib, GraphicalEventsAttrib, PaintAttrib, FontAttrib, OpacityAttrib, GraphicsAttrib, CursorAttrib, FilterAttrib, MaskAttrib, ClipAttrib, TextContentAttrib): """ Class representing the tref element of an svg doc. """ def __init__(self, **kwargs): BaseElement.__init__(self, 'tref') self.setKWARGS(**kwargs) def set_textLength(self, textLength): self._attributes['textLength'] = textLength def get_textLength(self): return self._attributes.get('textLength') def set_lengthAdjust(self, lengthAdjust): self._attributes['lengthAdjust'] = lengthAdjust def get_lengthAdjust(self): return self._attributes.get('lengthAdjust') class tspan(BaseElement, CoreAttrib, ConditionalAttrib, ExternalAttrib, StyleAttrib, PointAttrib, DeltaPointAttrib, RotateAttrib, GraphicalEventsAttrib, PaintAttrib, FontAttrib, OpacityAttrib, GraphicsAttrib, CursorAttrib, FilterAttrib, MaskAttrib, ClipAttrib, TextContentAttrib): """ Class representing the tspan element of an svg doc. """ def __init__(self, x=None, y=None, dx=None, dy=None, rotate=None, textLength=None, lengthAdjust=None, **kwargs): BaseElement.__init__(self, 'tspan') self.set_x(x) self.set_y(y) self.set_dx(dx) self.set_dy(dy) self.set_rotate(rotate) self.set_textLength(textLength) self.set_lengthAdjust(lengthAdjust) self.setKWARGS(**kwargs) def set_textLength(self, textLength): self._attributes['textLength'] = textLength def get_textLength(self): return self._attributes.get('textLength') def set_lengthAdjust(self, lengthAdjust): self._attributes['lengthAdjust'] = lengthAdjust def get_lengthAdjust(self): return self._attributes.get('lengthAdjust') class text(BaseElement, CoreAttrib, ConditionalAttrib, ExternalAttrib, StyleAttrib, PointAttrib, DeltaPointAttrib, RotateAttrib, GraphicalEventsAttrib, PaintAttrib, FontAttrib, OpacityAttrib, GraphicsAttrib, CursorAttrib, FilterAttrib, MaskAttrib, ClipAttrib, TextContentAttrib, TextAttrib): """ Class representing the text element of an svg doc. """ def __init__(self, content=None, x=None, y=None, dx=None, dy=None, rotate=None, textLength=None, lengthAdjust=None, **kwargs): BaseElement.__init__(self, 'text') if content <> None: self.appendTextContent(content) self.set_x(x) self.set_y(y) self.set_dx(dx) self.set_dy(dy) self.set_rotate(rotate) self.set_textLength(textLength) self.set_lengthAdjust(lengthAdjust) self.setKWARGS(**kwargs) def set_transform(self, transform): self._attributes['transform'] = transform def get_transform(self): return self._attributes.get('transform') def set_textLength(self, textLength): self._attributes['textLength'] = textLength def get_textLength(self): return self._attributes.get('textLength') def set_lengthAdjust(self, lengthAdjust): self._attributes['lengthAdjust'] = lengthAdjust def get_lengthAdjust(self): return self._attributes.get('lengthAdjust')
gpl-3.0
freeslugs/eventum
app/lib/text.py
1
2732
import re def truncate_html(text, truncate_len, truncate_text): """ Truncates HTML to a certain number of words (not counting tags and comments). Closes opened tags if they were correctly closed in the given HTML. If text is truncated, truncate_text will be appended to the result. Newlines in the HTML are preserved. Modified from django.utils.text https://github.com/django/django/blob/master/django/utils/text.py :param str text: The text to truncate. :param str truncate_len: The number of words to shorten the HTML to :param int truncate_len: Text like '...' to append to the end of tuncated text. :returns: The truncated HTML :rtype: str """ re_words = re.compile(r'<.*?>|((?:\w[-\w]*|&.*?;)+)', re.U | re.S) re_tag = re.compile(r'<(/)?([^ ]+?)(?:(\s*/)| .*?)?>', re.S) length = truncate_len if length <= 0: return '' html4_singlets = ( 'br', 'col', 'link', 'base', 'img', 'param', 'area', 'hr', 'input' ) # Count non-HTML chars/words and keep note of open tags pos = 0 end_text_pos = 0 current_len = 0 open_tags = [] while current_len <= length: m = re_words.search(text, pos) if not m: # Checked through whole string break pos = m.end(0) if m.group(1): # It's an actual non-HTML word or char current_len += 1 if current_len == truncate_len: end_text_pos = pos continue # Check for tag tag = re_tag.match(m.group(0)) if not tag or current_len >= truncate_len: # Don't worry about non tags or tags after our truncate point continue closing_tag, tagname, self_closing = tag.groups() # Element names are always case-insensitive tagname = tagname.lower() if self_closing or tagname in html4_singlets: pass elif closing_tag: # Check for match in open tags list try: i = open_tags.index(tagname) except ValueError: pass else: # SGML: An end tag closes, back to the matching start tag, # all unclosed intervening start tags with omitted end tags open_tags = open_tags[i + 1:] else: # Add it to the start of the open tags list open_tags.insert(0, tagname) if current_len <= length: return text out = text[:end_text_pos] if truncate_text: out += truncate_text # Close any tags still open for tag in open_tags: out += '</{}>'.format(tag) # Return string return out
mit
mttr/django
tests/bash_completion/tests.py
327
3888
""" A series of tests to establish that the command-line bash completion works. """ import os import sys import unittest from django.apps import apps from django.core.management import ManagementUtility from django.test.utils import captured_stdout class BashCompletionTests(unittest.TestCase): """ Testing the Python level bash completion code. This requires setting up the environment as if we got passed data from bash. """ def setUp(self): self.old_DJANGO_AUTO_COMPLETE = os.environ.get('DJANGO_AUTO_COMPLETE') os.environ['DJANGO_AUTO_COMPLETE'] = '1' def tearDown(self): if self.old_DJANGO_AUTO_COMPLETE: os.environ['DJANGO_AUTO_COMPLETE'] = self.old_DJANGO_AUTO_COMPLETE else: del os.environ['DJANGO_AUTO_COMPLETE'] def _user_input(self, input_str): """ Set the environment and the list of command line arguments. This sets the bash variables $COMP_WORDS and $COMP_CWORD. The former is an array consisting of the individual words in the current command line, the latter is the index of the current cursor position, so in case a word is completed and the cursor is placed after a whitespace, $COMP_CWORD must be incremented by 1: * 'django-admin start' -> COMP_CWORD=1 * 'django-admin startproject' -> COMP_CWORD=1 * 'django-admin startproject ' -> COMP_CWORD=2 """ os.environ['COMP_WORDS'] = input_str idx = len(input_str.split(' ')) - 1 # Index of the last word comp_cword = idx + 1 if input_str.endswith(' ') else idx os.environ['COMP_CWORD'] = str(comp_cword) sys.argv = input_str.split() def _run_autocomplete(self): util = ManagementUtility(argv=sys.argv) with captured_stdout() as stdout: try: util.autocomplete() except SystemExit: pass return stdout.getvalue().strip().split('\n') def test_django_admin_py(self): "django_admin.py will autocomplete option flags" self._user_input('django-admin sqlmigrate --verb') output = self._run_autocomplete() self.assertEqual(output, ['--verbosity=']) def test_manage_py(self): "manage.py will autocomplete option flags" self._user_input('manage.py sqlmigrate --verb') output = self._run_autocomplete() self.assertEqual(output, ['--verbosity=']) def test_custom_command(self): "A custom command can autocomplete option flags" self._user_input('django-admin test_command --l') output = self._run_autocomplete() self.assertEqual(output, ['--list']) def test_subcommands(self): "Subcommands can be autocompleted" self._user_input('django-admin sql') output = self._run_autocomplete() self.assertEqual(output, ['sqlflush sqlmigrate sqlsequencereset']) def test_completed_subcommand(self): "Show option flags in case a subcommand is completed" self._user_input('django-admin startproject ') # Trailing whitespace output = self._run_autocomplete() for item in output: self.assertTrue(item.startswith('--')) def test_help(self): "No errors, just an empty list if there are no autocomplete options" self._user_input('django-admin help --') output = self._run_autocomplete() self.assertEqual(output, ['']) def test_app_completion(self): "Application names will be autocompleted for an AppCommand" self._user_input('django-admin sqlmigrate a') output = self._run_autocomplete() a_labels = sorted(app_config.label for app_config in apps.get_app_configs() if app_config.label.startswith('a')) self.assertEqual(output, a_labels)
bsd-3-clause
arunkgupta/gramps
gramps/plugins/gramplet/ageondategramplet.py
1
3792
# Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2007-2009 Douglas S. Blank <doug.blank@gmail.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # $Id$ """ AgeOnDateGramplet computes the age for everyone thought to be alive on a particular date. """ #------------------------------------------------------------------------ # # Python modules # #------------------------------------------------------------------------ #------------------------------------------------------------------------ # # GRAMPS modules # #------------------------------------------------------------------------ from gramps.gen.plug import Gramplet from gramps.gen.ggettext import sgettext as _ from gramps.gen.datehandler import parser from gramps.gui.plug.quick import run_quick_report_by_name #------------------------------------------------------------------------ # # AgeOnDateGramplet class # #------------------------------------------------------------------------ class AgeOnDateGramplet(Gramplet): """ Gramplet that computes ages on a particular date for everyone thought to be alive. """ def init(self): """ Constructs the GUI, consisting of a message, an entry, and a Run button. """ from gi.repository import Gtk # GUI setup: self.set_tooltip(_("Enter a date, click Run")) vbox = Gtk.VBox() hbox = Gtk.HBox() # label, entry description = Gtk.TextView() description.set_wrap_mode(Gtk.WrapMode.WORD) description.set_editable(False) buffer = description.get_buffer() buffer.set_text(_("Enter a valid date (like YYYY-MM-DD) in the" " entry below and click Run. This will compute" " the ages for everyone in your Family Tree on" " that date. You can then sort by the age column," " and double-click the row to view or edit.")) label = Gtk.Label() label.set_text(_("Date") + ":") self.entry = Gtk.Entry() button = Gtk.Button(_("Run")) button.connect("clicked", self.run) ##self.filter = hbox.pack_start(label, False, True, 0) hbox.pack_start(self.entry, True, True, 0) vbox.pack_start(description, True, True, 0) vbox.pack_start(hbox, False, True, 0) vbox.pack_start(button, False, True, 0) self.gui.get_container_widget().remove(self.gui.textview) self.gui.get_container_widget().add_with_viewport(vbox) vbox.show_all() def post_init(self): self.disconnect("active-changed") def run(self, obj): """ Method that is run when you click the Run button. The date is retrieved from the entry box, parsed as a date, and then handed to the quick report. """ text = self.entry.get_text() date = parser.parse(text) run_quick_report_by_name(self.gui.dbstate, self.gui.uistate, 'ageondate', date)
gpl-2.0
nvoron23/arangodb
3rdParty/V8-4.3.61/tools/testrunner/server/__init__.py
651
1571
# Copyright 2012 the V8 project authors. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
apache-2.0
kristinriebe/uws-validator
features/steps/steps_http.py
1
5403
from utils import append_path, add_multi_path_segments, get_UwsName, get_XlinkName, get_dict_from_paramtable, get_joblink from lxml import etree as et import requests import uws from ensure import ensure, check from purl import URL # for parsing dates: from datetime import datetime import dateutil.parser import pytz # for job parameters in json format import json ## basic http support, mainly copied from behave-http, ## https://github.com/mikek/behave-http/, BSD 2-Clause License @given('I am using server "{server}"') def using_server(context, server): context.server = URL(server) @given('I set base URL to "{base_url}"') def set_base_url(context, base_url): context.server = add_multi_path_segments(context.server, base_url) @given('I set "{var}" header to "{value}"') def set_header(context, var, value): # We must keep the headers as implicit ascii to avoid encoding failure when # the entire HTTP body is constructed by concatenating strings. context.headers[var.encode('ascii')] = value.encode('ascii') @given('I set BasicAuth username to "{username}" and password to "{password}"') def set_basic_auth_headers(context, username, password): context.auth = (username, password) @when('I make a GET request to "{url_path_segment}"') def step_impl(context, url_path_segment): url = append_path(context.server, url_path_segment) print(" GET request to: ", url) if context.auth != (u'', u''): print(" with authentication details: ", context.auth) # if not url_path_segment.startswith('?'): # raise NotImplementedError("url: %r" % url) context.response = requests.get( url, headers=context.headers, auth=context.auth ) @when('I make a DELETE request to "{url_path_segment}"') def delete_request(context, url_path_segment): url = append_path(context.server, url_path_segment) context.response = requests.delete( url, headers=context.headers, auth=context.auth ) @then('the response status should be one of "{statuses}"') def response_status_in(context, statuses): ensure(context.response.status_code).is_in( [int(s) for s in statuses.split(',')] ) @then('the response status should be "{status}"') def response_status(context, status): ensure(context.response.status_code).equals(int(status)) @then('the response body should contain "{content}"') def response_body_contains(context, content): ensure(content).is_in(context.response.content.decode('utf-8')) @then('the "{var}" header should be "{value}"') def check_header_inline(context, var, value): ensure(context.response.headers[var].encode('ascii')).equals( value.encode('ascii')) ## end of part from behave-http @given('I set BasicAuth username and password to user-defined values') def set_basic_auth_headers(context): context.auth = (context.username, context.password) @given('I set base URL to user-defined value') def set_base_url(context): #context.server = context.server.add_path_segment(context.base_url) # This causes trouble on at least one setup (Windows 8, 64bit, Anaconda Python installation; '/' in path gets url-encoded!) # Thus try to avoid this: context.server = add_multi_path_segments(context.server, context.base_url) @when('I make a GET request to base URL') def step_impl(context): url = context.server print(" GET request to URL: ", url) if context.auth != (u'', u''): print(" with authentication details: ", context.auth) context.response = requests.get( url, headers=context.headers, auth=context.auth ) @when('I make a GET request to URL "{url}"') def step_impl(context, url): print(" GET request to URL: ", url) if context.auth != (u'', u''): print(" with authentication details: ", context.auth) context.response = requests.get( url, headers=context.headers, auth=context.auth ) @when('I make a POST request to "{url_path_segment}" with') def step_impl(context, url_path_segment): # convert given table-data to dictionary datadict = get_dict_from_paramtable(context.table) url = append_path(context.server, url_path_segment) print(" POST request to URL: ", url) if context.auth != (u'', u''): print(" with authentication details: ", context.auth) context.response = requests.post( url, data=datadict, headers=context.headers, auth=context.auth ) @when('I make a POST request to base URL with') def step_impl(context): # convert given table-data to dictionary datadict = get_dict_from_paramtable(context.table) url = context.server print(" POST request to URL: ", url) print(" with authentication details: ", context.auth) context.response = requests.post( url, data=datadict, headers=context.headers, auth=context.auth ) #print("response: ", context.response, context.response.text) @then('the "{var}" header should contain "{value}"') def check_header_inline(context, var, value): ensure(value.encode('ascii')).is_in(context.response.headers[var].encode('ascii')) @then('the response status should not be "{status}"') def response_not_status(context, status): ensure(context.response.status_code).is_not(int(status))
apache-2.0
ibobalo/python-for-android
test_builds/tests/test_apk.py
4
2423
from pythonforandroid.toolchain import main from pythonforandroid.recipe import Recipe from os import path import sys import pytest # Set these values manually before testing (for now) ndk_dir = '/home/asandy/android/crystax-ndk-10.3.2' ndk_version='crystax-ndk-10.3.2' cur_dir = path.dirname(path.abspath(__file__)) testapps_dir = path.join(path.split(path.split(cur_dir)[0])[0], 'testapps') orig_argv = sys.argv[:] def set_argv(argv): while sys.argv: sys.argv.pop() sys.argv.append(orig_argv[0]) for item in argv: sys.argv.append(item) for item in orig_argv[1:]: if item == '-s': continue sys.argv.append(item) argument_combinations = [{'app_dir': path.join(testapps_dir, 'testapp'), 'requirements': 'python2,pyjnius,kivy', 'packagename': 'p4a_test_sdl2', 'bootstrap': 'sdl2', 'ndk_dir': ndk_dir, 'ndk_version': ndk_version}, {'app_dir': path.join(testapps_dir, 'testapp'), 'requirements': 'python2,pyjnius,kivy', 'packagename': 'p4a_test_pygame', 'bootstrap': 'pygame', 'ndk_dir': ndk_dir, 'ndk_version': ndk_version}, {'app_dir': path.join(testapps_dir, 'testapp_flask'), 'requirements': 'python2,flask,pyjnius', 'packagename': 'p4a_test_flask', 'bootstrap': 'webview', 'ndk_dir': ndk_dir, 'ndk_version': ndk_version}, ] @pytest.mark.parametrize('args', argument_combinations) def test_build_sdl2(args): Recipe.recipes = {} set_argv(('apk --requirements={requirements} --private ' '{app_dir} --package=net.p4a.{packagename} --name={packagename} ' '--version=0.1 --bootstrap={bootstrap} --android_api=19 ' '--ndk_dir={ndk_dir} --ndk_version={ndk_version} --debug ' '--permission VIBRATE ' '--symlink-java-src ' '--orientation portrait --dist_name=test-{packagename}').format( **args).split(' ')) print('argv are', sys.argv) main()
mit
nan86150/ImageFusion
lib/python2.7/site-packages/setuptools/command/install.py
496
4685
from distutils.errors import DistutilsArgError import inspect import glob import warnings import platform import distutils.command.install as orig import setuptools # Prior to numpy 1.9, NumPy relies on the '_install' name, so provide it for # now. See https://bitbucket.org/pypa/setuptools/issue/199/ _install = orig.install class install(orig.install): """Use easy_install to install the package, w/dependencies""" user_options = orig.install.user_options + [ ('old-and-unmanageable', None, "Try not to use this!"), ('single-version-externally-managed', None, "used by system package builders to create 'flat' eggs"), ] boolean_options = orig.install.boolean_options + [ 'old-and-unmanageable', 'single-version-externally-managed', ] new_commands = [ ('install_egg_info', lambda self: True), ('install_scripts', lambda self: True), ] _nc = dict(new_commands) def initialize_options(self): orig.install.initialize_options(self) self.old_and_unmanageable = None self.single_version_externally_managed = None def finalize_options(self): orig.install.finalize_options(self) if self.root: self.single_version_externally_managed = True elif self.single_version_externally_managed: if not self.root and not self.record: raise DistutilsArgError( "You must specify --record or --root when building system" " packages" ) def handle_extra_path(self): if self.root or self.single_version_externally_managed: # explicit backward-compatibility mode, allow extra_path to work return orig.install.handle_extra_path(self) # Ignore extra_path when installing an egg (or being run by another # command without --root or --single-version-externally-managed self.path_file = None self.extra_dirs = '' def run(self): # Explicit request for old-style install? Just do it if self.old_and_unmanageable or self.single_version_externally_managed: return orig.install.run(self) if not self._called_from_setup(inspect.currentframe()): # Run in backward-compatibility mode to support bdist_* commands. orig.install.run(self) else: self.do_egg_install() @staticmethod def _called_from_setup(run_frame): """ Attempt to detect whether run() was called from setup() or by another command. If called by setup(), the parent caller will be the 'run_command' method in 'distutils.dist', and *its* caller will be the 'run_commands' method. If called any other way, the immediate caller *might* be 'run_command', but it won't have been called by 'run_commands'. Return True in that case or if a call stack is unavailable. Return False otherwise. """ if run_frame is None: msg = "Call stack not available. bdist_* commands may fail." warnings.warn(msg) if platform.python_implementation() == 'IronPython': msg = "For best results, pass -X:Frames to enable call stack." warnings.warn(msg) return True res = inspect.getouterframes(run_frame)[2] caller, = res[:1] info = inspect.getframeinfo(caller) caller_module = caller.f_globals.get('__name__', '') return ( caller_module == 'distutils.dist' and info.function == 'run_commands' ) def do_egg_install(self): easy_install = self.distribution.get_command_class('easy_install') cmd = easy_install( self.distribution, args="x", root=self.root, record=self.record, ) cmd.ensure_finalized() # finalize before bdist_egg munges install cmd cmd.always_copy_from = '.' # make sure local-dir eggs get installed # pick up setup-dir .egg files only: no .egg-info cmd.package_index.scan(glob.glob('*.egg')) self.run_command('bdist_egg') args = [self.distribution.get_command_obj('bdist_egg').egg_output] if setuptools.bootstrap_install_from: # Bootstrap self-installation of setuptools args.insert(0, setuptools.bootstrap_install_from) cmd.args = args cmd.run() setuptools.bootstrap_install_from = None # XXX Python 3.1 doesn't see _nc if this is inside the class install.sub_commands = ( [cmd for cmd in orig.install.sub_commands if cmd[0] not in install._nc] + install.new_commands )
mit
gauribhoite/personfinder
app/unidecode/x06f.py
252
4650
data = ( 'Qing ', # 0x00 'Yu ', # 0x01 'Piao ', # 0x02 'Ji ', # 0x03 'Ya ', # 0x04 'Jiao ', # 0x05 'Qi ', # 0x06 'Xi ', # 0x07 'Ji ', # 0x08 'Lu ', # 0x09 'Lu ', # 0x0a 'Long ', # 0x0b 'Jin ', # 0x0c 'Guo ', # 0x0d 'Cong ', # 0x0e 'Lou ', # 0x0f 'Zhi ', # 0x10 'Gai ', # 0x11 'Qiang ', # 0x12 'Li ', # 0x13 'Yan ', # 0x14 'Cao ', # 0x15 'Jiao ', # 0x16 'Cong ', # 0x17 'Qun ', # 0x18 'Tuan ', # 0x19 'Ou ', # 0x1a 'Teng ', # 0x1b 'Ye ', # 0x1c 'Xi ', # 0x1d 'Mi ', # 0x1e 'Tang ', # 0x1f 'Mo ', # 0x20 'Shang ', # 0x21 'Han ', # 0x22 'Lian ', # 0x23 'Lan ', # 0x24 'Wa ', # 0x25 'Li ', # 0x26 'Qian ', # 0x27 'Feng ', # 0x28 'Xuan ', # 0x29 'Yi ', # 0x2a 'Man ', # 0x2b 'Zi ', # 0x2c 'Mang ', # 0x2d 'Kang ', # 0x2e 'Lei ', # 0x2f 'Peng ', # 0x30 'Shu ', # 0x31 'Zhang ', # 0x32 'Zhang ', # 0x33 'Chong ', # 0x34 'Xu ', # 0x35 'Huan ', # 0x36 'Kuo ', # 0x37 'Jian ', # 0x38 'Yan ', # 0x39 'Chuang ', # 0x3a 'Liao ', # 0x3b 'Cui ', # 0x3c 'Ti ', # 0x3d 'Yang ', # 0x3e 'Jiang ', # 0x3f 'Cong ', # 0x40 'Ying ', # 0x41 'Hong ', # 0x42 'Xun ', # 0x43 'Shu ', # 0x44 'Guan ', # 0x45 'Ying ', # 0x46 'Xiao ', # 0x47 '[?] ', # 0x48 '[?] ', # 0x49 'Xu ', # 0x4a 'Lian ', # 0x4b 'Zhi ', # 0x4c 'Wei ', # 0x4d 'Pi ', # 0x4e 'Jue ', # 0x4f 'Jiao ', # 0x50 'Po ', # 0x51 'Dang ', # 0x52 'Hui ', # 0x53 'Jie ', # 0x54 'Wu ', # 0x55 'Pa ', # 0x56 'Ji ', # 0x57 'Pan ', # 0x58 'Gui ', # 0x59 'Xiao ', # 0x5a 'Qian ', # 0x5b 'Qian ', # 0x5c 'Xi ', # 0x5d 'Lu ', # 0x5e 'Xi ', # 0x5f 'Xuan ', # 0x60 'Dun ', # 0x61 'Huang ', # 0x62 'Min ', # 0x63 'Run ', # 0x64 'Su ', # 0x65 'Liao ', # 0x66 'Zhen ', # 0x67 'Zhong ', # 0x68 'Yi ', # 0x69 'Di ', # 0x6a 'Wan ', # 0x6b 'Dan ', # 0x6c 'Tan ', # 0x6d 'Chao ', # 0x6e 'Xun ', # 0x6f 'Kui ', # 0x70 'Yie ', # 0x71 'Shao ', # 0x72 'Tu ', # 0x73 'Zhu ', # 0x74 'San ', # 0x75 'Hei ', # 0x76 'Bi ', # 0x77 'Shan ', # 0x78 'Chan ', # 0x79 'Chan ', # 0x7a 'Shu ', # 0x7b 'Tong ', # 0x7c 'Pu ', # 0x7d 'Lin ', # 0x7e 'Wei ', # 0x7f 'Se ', # 0x80 'Se ', # 0x81 'Cheng ', # 0x82 'Jiong ', # 0x83 'Cheng ', # 0x84 'Hua ', # 0x85 'Jiao ', # 0x86 'Lao ', # 0x87 'Che ', # 0x88 'Gan ', # 0x89 'Cun ', # 0x8a 'Heng ', # 0x8b 'Si ', # 0x8c 'Shu ', # 0x8d 'Peng ', # 0x8e 'Han ', # 0x8f 'Yun ', # 0x90 'Liu ', # 0x91 'Hong ', # 0x92 'Fu ', # 0x93 'Hao ', # 0x94 'He ', # 0x95 'Xian ', # 0x96 'Jian ', # 0x97 'Shan ', # 0x98 'Xi ', # 0x99 'Oki ', # 0x9a '[?] ', # 0x9b 'Lan ', # 0x9c '[?] ', # 0x9d 'Yu ', # 0x9e 'Lin ', # 0x9f 'Min ', # 0xa0 'Zao ', # 0xa1 'Dang ', # 0xa2 'Wan ', # 0xa3 'Ze ', # 0xa4 'Xie ', # 0xa5 'Yu ', # 0xa6 'Li ', # 0xa7 'Shi ', # 0xa8 'Xue ', # 0xa9 'Ling ', # 0xaa 'Man ', # 0xab 'Zi ', # 0xac 'Yong ', # 0xad 'Kuai ', # 0xae 'Can ', # 0xaf 'Lian ', # 0xb0 'Dian ', # 0xb1 'Ye ', # 0xb2 'Ao ', # 0xb3 'Huan ', # 0xb4 'Zhen ', # 0xb5 'Chan ', # 0xb6 'Man ', # 0xb7 'Dan ', # 0xb8 'Dan ', # 0xb9 'Yi ', # 0xba 'Sui ', # 0xbb 'Pi ', # 0xbc 'Ju ', # 0xbd 'Ta ', # 0xbe 'Qin ', # 0xbf 'Ji ', # 0xc0 'Zhuo ', # 0xc1 'Lian ', # 0xc2 'Nong ', # 0xc3 'Guo ', # 0xc4 'Jin ', # 0xc5 'Fen ', # 0xc6 'Se ', # 0xc7 'Ji ', # 0xc8 'Sui ', # 0xc9 'Hui ', # 0xca 'Chu ', # 0xcb 'Ta ', # 0xcc 'Song ', # 0xcd 'Ding ', # 0xce '[?] ', # 0xcf 'Zhu ', # 0xd0 'Lai ', # 0xd1 'Bin ', # 0xd2 'Lian ', # 0xd3 'Mi ', # 0xd4 'Shi ', # 0xd5 'Shu ', # 0xd6 'Mi ', # 0xd7 'Ning ', # 0xd8 'Ying ', # 0xd9 'Ying ', # 0xda 'Meng ', # 0xdb 'Jin ', # 0xdc 'Qi ', # 0xdd 'Pi ', # 0xde 'Ji ', # 0xdf 'Hao ', # 0xe0 'Ru ', # 0xe1 'Zui ', # 0xe2 'Wo ', # 0xe3 'Tao ', # 0xe4 'Yin ', # 0xe5 'Yin ', # 0xe6 'Dui ', # 0xe7 'Ci ', # 0xe8 'Huo ', # 0xe9 'Jing ', # 0xea 'Lan ', # 0xeb 'Jun ', # 0xec 'Ai ', # 0xed 'Pu ', # 0xee 'Zhuo ', # 0xef 'Wei ', # 0xf0 'Bin ', # 0xf1 'Gu ', # 0xf2 'Qian ', # 0xf3 'Xing ', # 0xf4 'Hama ', # 0xf5 'Kuo ', # 0xf6 'Fei ', # 0xf7 '[?] ', # 0xf8 'Boku ', # 0xf9 'Jian ', # 0xfa 'Wei ', # 0xfb 'Luo ', # 0xfc 'Zan ', # 0xfd 'Lu ', # 0xfe 'Li ', # 0xff )
apache-2.0
mahendra-r/edx-platform
cms/envs/test.py
15
9245
# -*- coding: utf-8 -*- """ This config file runs the simplest dev environment using sqlite, and db-based sessions. Assumes structure: /envroot/ /db # This is where it'll write the database file /edx-platform # The location of this repo /log # Where we're going to write log files """ # We intentionally define lots of variables that aren't used, and # want to import all variables from base settings files # pylint: disable=wildcard-import, unused-wildcard-import # Pylint gets confused by path.py instances, which report themselves as class # objects. As a result, pylint applies the wrong regex in validating names, # and throws spurious errors. Therefore, we disable invalid-name checking. # pylint: disable=invalid-name from .common import * import os from path import Path as path from warnings import filterwarnings, simplefilter from uuid import uuid4 # import settings from LMS for consistent behavior with CMS # pylint: disable=unused-import from lms.envs.test import ( WIKI_ENABLED, PLATFORM_NAME, SITE_NAME, DEFAULT_FILE_STORAGE, MEDIA_ROOT, MEDIA_URL, # This is practically unused but needed by the oauth2_provider package, which # some tests in common/ rely on. OAUTH_OIDC_ISSUER, ) # mongo connection settings MONGO_PORT_NUM = int(os.environ.get('EDXAPP_TEST_MONGO_PORT', '27017')) MONGO_HOST = os.environ.get('EDXAPP_TEST_MONGO_HOST', 'localhost') THIS_UUID = uuid4().hex[:5] # Nose Test Runner TEST_RUNNER = 'django_nose.NoseTestSuiteRunner' _SYSTEM = 'cms' _REPORT_DIR = REPO_ROOT / 'reports' / _SYSTEM _REPORT_DIR.makedirs_p() _NOSEID_DIR = REPO_ROOT / '.testids' / _SYSTEM _NOSEID_DIR.makedirs_p() NOSE_ARGS = [ '--id-file', _NOSEID_DIR / 'noseids', '--xunit-file', _REPORT_DIR / 'nosetests.xml', ] TEST_ROOT = path('test_root') # Want static files in the same dir for running on jenkins. STATIC_ROOT = TEST_ROOT / "staticfiles" GITHUB_REPO_ROOT = TEST_ROOT / "data" DATA_DIR = TEST_ROOT / "data" COMMON_TEST_DATA_ROOT = COMMON_ROOT / "test" / "data" # For testing "push to lms" FEATURES['ENABLE_EXPORT_GIT'] = True GIT_REPO_EXPORT_DIR = TEST_ROOT / "export_course_repos" # Makes the tests run much faster... SOUTH_TESTS_MIGRATE = False # To disable migrations and use syncdb instead # TODO (cpennington): We need to figure out how envs/test.py can inject things into common.py so that we don't have to repeat this sort of thing STATICFILES_DIRS = [ COMMON_ROOT / "static", PROJECT_ROOT / "static", ] STATICFILES_DIRS += [ (course_dir, COMMON_TEST_DATA_ROOT / course_dir) for course_dir in os.listdir(COMMON_TEST_DATA_ROOT) if os.path.isdir(COMMON_TEST_DATA_ROOT / course_dir) ] # Avoid having to run collectstatic before the unit test suite # If we don't add these settings, then Django templates that can't # find pipelined assets will raise a ValueError. # http://stackoverflow.com/questions/12816941/unit-testing-with-django-pipeline STATICFILES_STORAGE = 'pipeline.storage.NonPackagingPipelineStorage' STATIC_URL = "/static/" PIPELINE_ENABLED = False TENDER_DOMAIN = "help.edge.edx.org" TENDER_SUBDOMAIN = "edxedge" # Update module store settings per defaults for tests update_module_store_settings( MODULESTORE, module_store_options={ 'default_class': 'xmodule.raw_module.RawDescriptor', 'fs_root': TEST_ROOT / "data", }, doc_store_settings={ 'db': 'test_xmodule', 'host': MONGO_HOST, 'port': MONGO_PORT_NUM, 'collection': 'test_modulestore{0}'.format(THIS_UUID), }, ) CONTENTSTORE = { 'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore', 'DOC_STORE_CONFIG': { 'host': MONGO_HOST, 'db': 'test_xcontent', 'port': MONGO_PORT_NUM, 'collection': 'dont_trip', }, # allow for additional options that can be keyed on a name, e.g. 'trashcan' 'ADDITIONAL_OPTIONS': { 'trashcan': { 'bucket': 'trash_fs' } } } DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': TEST_ROOT / "db" / "cms.db", }, } LMS_BASE = "localhost:8000" FEATURES['PREVIEW_LMS_BASE'] = "preview" CACHES = { # This is the cache used for most things. Askbot will not work without a # functioning cache -- it relies on caching to load its settings in places. # In staging/prod envs, the sessions also live here. 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'edx_loc_mem_cache', 'KEY_FUNCTION': 'util.memcache.safe_key', }, # The general cache is what you get if you use our util.cache. It's used for # things like caching the course.xml file for different A/B test groups. # We set it to be a DummyCache to force reloading of course.xml in dev. # In staging environments, we would grab VERSION from data uploaded by the # push process. 'general': { 'BACKEND': 'django.core.cache.backends.dummy.DummyCache', 'KEY_PREFIX': 'general', 'VERSION': 4, 'KEY_FUNCTION': 'util.memcache.safe_key', }, 'mongo_metadata_inheritance': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': os.path.join(tempfile.gettempdir(), 'mongo_metadata_inheritance'), 'TIMEOUT': 300, 'KEY_FUNCTION': 'util.memcache.safe_key', }, 'loc_cache': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'edx_location_mem_cache', }, 'course_structure_cache': { 'BACKEND': 'django.core.cache.backends.dummy.DummyCache', }, } # Add external_auth to Installed apps for testing INSTALLED_APPS += ('external_auth', ) # Add milestones to Installed apps for testing INSTALLED_APPS += ('milestones', 'openedx.core.djangoapps.call_stack_manager') # hide ratelimit warnings while running tests filterwarnings('ignore', message='No request passed to the backend, unable to rate-limit') # Ignore deprecation warnings (so we don't clutter Jenkins builds/production) # https://docs.python.org/2/library/warnings.html#the-warnings-filter # Change to "default" to see the first instance of each hit # or "error" to convert all into errors simplefilter('ignore') ################################# CELERY ###################################### CELERY_ALWAYS_EAGER = True CELERY_RESULT_BACKEND = 'djcelery.backends.cache:CacheBackend' ########################### Server Ports ################################### # These ports are carefully chosen so that if the browser needs to # access them, they will be available through the SauceLabs SSH tunnel LETTUCE_SERVER_PORT = 8003 XQUEUE_PORT = 8040 YOUTUBE_PORT = 8031 LTI_PORT = 8765 VIDEO_SOURCE_PORT = 8777 ################### Make tests faster # http://slacy.com/blog/2012/04/make-your-tests-faster-in-django-1-4/ PASSWORD_HASHERS = ( 'django.contrib.auth.hashers.SHA1PasswordHasher', 'django.contrib.auth.hashers.MD5PasswordHasher', ) # dummy segment-io key SEGMENT_IO_KEY = '***REMOVED***' FEATURES['ENABLE_SERVICE_STATUS'] = True # Toggles embargo on for testing FEATURES['EMBARGO'] = True # set up some testing for microsites MICROSITE_CONFIGURATION = { "test_microsite": { "domain_prefix": "testmicrosite", "university": "test_microsite", "platform_name": "Test Microsite", "logo_image_url": "test_microsite/images/header-logo.png", "email_from_address": "test_microsite@edx.org", "payment_support_email": "test_microsite@edx.org", "ENABLE_MKTG_SITE": False, "SITE_NAME": "test_microsite.localhost", "course_org_filter": "TestMicrositeX", "course_about_show_social_links": False, "css_overrides_file": "test_microsite/css/test_microsite.css", "show_partners": False, "show_homepage_promo_video": False, "course_index_overlay_text": "This is a Test Microsite Overlay Text.", "course_index_overlay_logo_file": "test_microsite/images/header-logo.png", "homepage_overlay_html": "<h1>This is a Test Microsite Overlay HTML</h1>" }, "default": { "university": "default_university", "domain_prefix": "www", } } MICROSITE_ROOT_DIR = COMMON_ROOT / 'test' / 'test_microsites' FEATURES['USE_MICROSITES'] = True # For consistency in user-experience, keep the value of this setting in sync with # the one in lms/envs/test.py FEATURES['ENABLE_DISCUSSION_SERVICE'] = False # Enable a parental consent age limit for testing PARENTAL_CONSENT_AGE_LIMIT = 13 # Enable content libraries code for the tests FEATURES['ENABLE_CONTENT_LIBRARIES'] = True FEATURES['ENABLE_EDXNOTES'] = True # MILESTONES FEATURES['MILESTONES_APP'] = True # ENTRANCE EXAMS FEATURES['ENTRANCE_EXAMS'] = True ENTRANCE_EXAM_MIN_SCORE_PCT = 50 VIDEO_CDN_URL = { 'CN': 'http://api.xuetangx.com/edx/video?s3_url=' } # Courseware Search Index FEATURES['ENABLE_COURSEWARE_INDEX'] = True FEATURES['ENABLE_LIBRARY_INDEX'] = True SEARCH_ENGINE = "search.tests.mock_search_engine.MockSearchEngine" # teams feature FEATURES['ENABLE_TEAMS'] = True # Dummy secret key for dev/test SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
agpl-3.0
adcomp/picota-html5-playground
pyqt5-webkit.py
1
2752
#!/usr/bin/python3 # -*- coding: utf-8 -*- # David Art [aka] ADcomp <david.madbox@gmail.com> import os import sys from PyQt5.QtWidgets import (QMainWindow, QApplication, QInputDialog, QLineEdit, QMessageBox) from PyQt5.QtCore import (QUrl, QObject, pyqtSlot, pyqtProperty) from PyQt5.QtWebKitWidgets import (QWebView, QWebInspector) __appname__ = "picota" __author__ = "David Art (ADcomp)" __license__ = "GNU GPL 2 or later" REALPATH = os.path.dirname(os.path.realpath(__file__)) class JsObj(QObject): """OBject class to communicate with javascript.""" def __init__(self, parent=None): QObject.__init__(self, parent) self.mainapp = parent self._output = "" self._editor = "" @pyqtSlot(str, result=str) def echo(self, text="what?"): return str(text) @pyqtSlot(str, result=str) def autoComplete(self, text): return self.mainapp.console.autoComplete(text) @pyqtSlot(str, result=str) def history(self, text): return self.mainapp.console._get_history(text) @pyqtSlot(str, result=str) def execute(self, text): return self.mainapp.console._executeLine(text) @pyqtSlot(str, result=str) def runcode(self, text): return self.mainapp.console._runcode(text) def _getOutput(self): return self._output output = pyqtProperty(str, fget=_getOutput) class WebView(QWebView): def __init__(self, parent, realpath): super(WebView, self).__init__(parent) self.mainapp = parent # create an obj for communicate with js self.jsobj = JsObj(parent) self.page().mainFrame().addToJavaScriptWindowObject("picota", self.jsobj) self._home = "file:///%s/www/remote.html" % realpath.replace("\\", "/") self.gohome() # Inspector (for debug ..) settings = self.page().settings() settings.setAttribute(settings.DeveloperExtrasEnabled, True) self.inspector = QWebInspector() self.inspector.setPage(self.page()) def gohome(self): self.load(QUrl(self._home)) def goto(self, url): self.load(QUrl(url)) def eval(self, text): # code = json.dumps(text) code = text return self.page().mainFrame().evaluateJavaScript(code) class MainWindow(QMainWindow): """ main window (QMainWindow) """ def __init__(self): super(MainWindow, self).__init__() self.setWindowTitle("picota") self.webview = WebView(self, REALPATH) self.setCentralWidget(self.webview) if __name__ == '__main__': app = QApplication(sys.argv) window = MainWindow() window.resize(800, 600) window.show() # window.webview.inspector.show() sys.exit(app.exec_())
mit
gogobebe2/Replicating-DeepMind
libraries/cuda-convnet2/python_util/data.py
180
7803
# Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as n from numpy.random import randn, rand, random_integers import os from threading import Thread from util import * BATCH_META_FILE = "batches.meta" class DataLoaderThread(Thread): def __init__(self, path, tgt): Thread.__init__(self) self.path = path self.tgt = tgt def run(self): self.tgt += [unpickle(self.path)] class DataProvider: BATCH_REGEX = re.compile('^data_batch_(\d+)(\.\d+)?$') def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params={}, test=False): if batch_range == None: batch_range = DataProvider.get_batch_nums(data_dir) if init_batchnum is None or init_batchnum not in batch_range: init_batchnum = batch_range[0] self.data_dir = data_dir self.batch_range = batch_range self.curr_epoch = init_epoch self.curr_batchnum = init_batchnum self.dp_params = dp_params self.batch_meta = self.get_batch_meta(data_dir) self.data_dic = None self.test = test self.batch_idx = batch_range.index(init_batchnum) def get_next_batch(self): if self.data_dic is None or len(self.batch_range) > 1: self.data_dic = self.get_batch(self.curr_batchnum) epoch, batchnum = self.curr_epoch, self.curr_batchnum self.advance_batch() return epoch, batchnum, self.data_dic def get_batch(self, batch_num): fname = self.get_data_file_name(batch_num) if os.path.isdir(fname): # batch in sub-batches sub_batches = sorted(os.listdir(fname), key=alphanum_key) #print sub_batches num_sub_batches = len(sub_batches) tgts = [[] for i in xrange(num_sub_batches)] threads = [DataLoaderThread(os.path.join(fname, s), tgt) for (s, tgt) in zip(sub_batches, tgts)] for thread in threads: thread.start() for thread in threads: thread.join() return [t[0] for t in tgts] return unpickle(self.get_data_file_name(batch_num)) def get_data_dims(self,idx=0): return self.batch_meta['num_vis'] if idx == 0 else 1 def advance_batch(self): self.batch_idx = self.get_next_batch_idx() self.curr_batchnum = self.batch_range[self.batch_idx] if self.batch_idx == 0: # we wrapped self.curr_epoch += 1 def get_next_batch_idx(self): return (self.batch_idx + 1) % len(self.batch_range) def get_next_batch_num(self): return self.batch_range[self.get_next_batch_idx()] # get filename of current batch def get_data_file_name(self, batchnum=None): if batchnum is None: batchnum = self.curr_batchnum return os.path.join(self.data_dir, 'data_batch_%d' % batchnum) @classmethod def get_instance(cls, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, type="default", dp_params={}, test=False): # why the fuck can't i reference DataProvider in the original definition? #cls.dp_classes['default'] = DataProvider type = type or DataProvider.get_batch_meta(data_dir)['dp_type'] # allow data to decide data provider if type.startswith("dummy-"): name = "-".join(type.split('-')[:-1]) + "-n" if name not in dp_types: raise DataProviderException("No such data provider: %s" % type) _class = dp_classes[name] dims = int(type.split('-')[-1]) return _class(dims) elif type in dp_types: _class = dp_classes[type] return _class(data_dir, batch_range, init_epoch, init_batchnum, dp_params, test) raise DataProviderException("No such data provider: %s" % type) @classmethod def register_data_provider(cls, name, desc, _class): if name in dp_types: raise DataProviderException("Data provider %s already registered" % name) dp_types[name] = desc dp_classes[name] = _class @staticmethod def get_batch_meta(data_dir): return unpickle(os.path.join(data_dir, BATCH_META_FILE)) @staticmethod def get_batch_filenames(srcdir): return sorted([f for f in os.listdir(srcdir) if DataProvider.BATCH_REGEX.match(f)], key=alphanum_key) @staticmethod def get_batch_nums(srcdir): names = DataProvider.get_batch_filenames(srcdir) return sorted(list(set(int(DataProvider.BATCH_REGEX.match(n).group(1)) for n in names))) @staticmethod def get_num_batches(srcdir): return len(DataProvider.get_batch_nums(srcdir)) class DummyDataProvider(DataProvider): def __init__(self, data_dim): #self.data_dim = data_dim self.batch_range = [1] self.batch_meta = {'num_vis': data_dim, 'data_in_rows':True} self.curr_epoch = 1 self.curr_batchnum = 1 self.batch_idx = 0 def get_next_batch(self): epoch, batchnum = self.curr_epoch, self.curr_batchnum self.advance_batch() data = rand(512, self.get_data_dims()).astype(n.single) return self.curr_epoch, self.curr_batchnum, {'data':data} class LabeledDataProvider(DataProvider): def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params={}, test=False): DataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test) def get_num_classes(self): return len(self.batch_meta['label_names']) class LabeledDummyDataProvider(DummyDataProvider): def __init__(self, data_dim, num_classes=10, num_cases=7): #self.data_dim = data_dim self.batch_range = [1] self.batch_meta = {'num_vis': data_dim, 'label_names': [str(x) for x in range(num_classes)], 'data_in_rows':True} self.num_cases = num_cases self.num_classes = num_classes self.curr_epoch = 1 self.curr_batchnum = 1 self.batch_idx=0 self.data = None def get_num_classes(self): return self.num_classes def get_next_batch(self): epoch, batchnum = self.curr_epoch, self.curr_batchnum self.advance_batch() if self.data is None: data = rand(self.num_cases, self.get_data_dims()).astype(n.single) # <--changed to rand labels = n.require(n.c_[random_integers(0,self.num_classes-1,self.num_cases)], requirements='C', dtype=n.single) self.data, self.labels = data, labels else: data, labels = self.data, self.labels # print data.shape, labels.shape return self.curr_epoch, self.curr_batchnum, [data.T, labels.T ] dp_types = {"dummy-n": "Dummy data provider for n-dimensional data", "dummy-labeled-n": "Labeled dummy data provider for n-dimensional data"} dp_classes = {"dummy-n": DummyDataProvider, "dummy-labeled-n": LabeledDummyDataProvider} class DataProviderException(Exception): pass
gpl-3.0
PeterWangIntel/chromium-crosswalk
ppapi/native_client/tools/browser_tester/browsertester/rpclistener.py
170
2297
#!/usr/bin/python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import sys import time class RPCListener(object): def __init__(self, shutdown_callback): self.shutdown_callback = shutdown_callback self.prefix = '|||| ' self.ever_failed = False self.start_time = time.time() def Log(self, message): # Display the number of milliseconds since startup. # This gives us additional data for debugging bot behavior. prefix = '[%6s ms] ' % int((time.time()-self.start_time)*1000) + self.prefix lines = [line.rstrip() for line in message.split('\n')] text = ''.join(['%s%s\n' % (prefix, line) for line in lines]) sys.stdout.write(text) def TestLog(self, message): self.Log(message) return 'OK' # Something went very wrong on the server side, everything is horked? # Only called locally. def ServerError(self, message): self.Log('\n[SERVER_ERROR] %s' % (message,)) self.ever_failed = True self._TestingDone() return 'OK' # Does nothing. Called to prevent timeouts. (The server resets the timeout # every time it receives a GET request.) def Ping(self): return 'OK' # This happens automatically, as long as the renderer process has not crashed. def JavaScriptIsAlive(self): return 'OK' def Shutdown(self, message, passed): self.Log(message) # This check looks slightly backwards, but this is intentional. # Everything but passed.lower() == 'true' is considered a failure. This # means that if the test runner sends garbage, it will be a failure. # NOTE in interactive mode this function may be called multiple times. # ever_failed is designed to be set and never reset - if any of the runs # fail, the an error code will be returned to the command line. # In summary, the tester is biased towards failure - it should scream "FAIL" # if things are not 100% correct. False positives must be avoided. if passed.lower() != 'true': self.ever_failed = True close_browser = self._TestingDone() if close_browser: return 'Die, please' else: return 'OK' def _TestingDone(self): return self.shutdown_callback()
bsd-3-clause
qaenablers/python-behavetutorial
features/dict/steps/create.py
1
1333
# -*- coding: utf-8 -*- import behave from behave import step from hamcrest import assert_that, is_, equal_to, is_not, is_in import logging # Change the parameter matcher used in parsing step text: parse, cfparse, re # https://pythonhosted.org/behave/api.html#step-parameters behave.use_step_matcher("re") __logger__ = logging.getLogger("qa.create") @step(u'a configured python environment') def configured_python_environment(context): __logger__.info(" >> Environment: '%s'", context.config.userdata['environment']) __logger__.info(" >> Dict value in 'Given' step: '%s'", context.dict) @step(u'I create a new dict with key with this values') def create_new_dict(context): for row in context.table: context.dict.update({row["key"]: row["value"]}) __logger__.info(" >> Dict value in 'Then' step (after execution): '%s'", context.dict) @step(u'the dict is created') def the_dict_is_created(context): assert_that(isinstance(context.dict, dict), is_(True), "The dict has not been created") @step(u'the key "(?P<key>.*)" has the value "(?P<value>.*)"') def the_length_is(context, key, value): assert_that(key, is_in(context.dict), "The is not in the dict") assert_that(context.dict[key], is_(equal_to(value)), "The value is not the expected")
apache-2.0
bowen0701/algorithms_data_structures
lc0222_count_complete_tree_nodes.py
1
4019
"""Leetcode 222. Count Complete Tree Nodes Medium URL: https://leetcode.com/problems/count-complete-tree-nodes/ Given a complete binary tree, count the number of nodes. Note: Definition of a complete binary tree from Wikipedia: In a complete binary tree every level, except possibly the last, is completely filled, and all nodes in the last level are as far left as possible. It can have between 1 and 2h nodes inclusive at the last level h. Example: Input: 1 / \ 2 3 / \ / 4 5 6 Output: 6 """ # Definition for a binary tree node. class TreeNode(object): def __init__(self, val): self.val = val self.left = None self.right = None class SolutionPreorderRecur(object): def _preorderRecur(self, root): if not root: return None self.n_nodes += 1 self._preorderRecur(root.left) self._preorderRecur(root.right) def countNodes(self, root): """ :type root: TreeNode :rtype: int Time complexity: O(n). Space complexity: O(logn) for balanced tree; O(n) for single sided. """ # Apply recursive preorder traversal. self.n_nodes = 0 self._preorderRecur(root) return self.n_nodes class SolutionPreorderIter(object): def countNodes(self, root): """ :type root: TreeNode :rtype: int Time complexity: O(n). Space complexity: O(logn) for balanced tree; O(n) for single sided. """ if not root: return 0 n_nodes = 0 stack = [root] while stack: current = stack.pop() n_nodes += 1 if current.right: stack.append(current.right) if current.left: stack.append(current.left) return n_nodes class SolutionLevelorderIter(object): def countNodes(self, root): """ :type root: TreeNode :rtype: int Time complexity: O(n). Space complexity: O(logn) for balanced tree; O(n) for single sided. """ from collections import deque queue = deque([root]) n_nodes = 0 while queue: for i in range(len(queue)): current = queue.pop() n_nodes += 1 if current.left: queue.appendleft(current.left) if current.right: queue.appendleft(current.right) return n_nodes class SolutionLeftRightDepths(object): def _countLeftDepth(self, root): if not root: return 0 return 1 + self._countLeftDepth(root.left) def countNodes(self, root): """ :type root: TreeNode :rtype: int Time complexity: O((logn)^2) for balanced tree; O(n^2) for single sided. Space complexity: O(logn) for balanced tree; O(n) for single sided. """ if not root: return 0 # Compare left & right subtrees's depths. left_depth = self._countLeftDepth(root.left) right_depth = self._countLeftDepth(root.right) if left_depth == right_depth: # If left & right depths are equal, left subtree is full. return pow(2, left_depth) + self.countNodes(root.right) else: # If not, right subtree is full, and left depth is bigger. return self.countNodes(root.left) + pow(2, right_depth) def main(): # Input: # 1 # / \ # 2 3 # / \ / # 4 5 6 # Output: 6 root = TreeNode(1) root.left = TreeNode(2) root.right = TreeNode(3) root.left.left = TreeNode(4) root.left.right = TreeNode(5) root.right.left = TreeNode(6) print SolutionPreorderRecur().countNodes(root) print SolutionPreorderIter().countNodes(root) print SolutionLevelorderIter().countNodes(root) print SolutionLeftRightDepths().countNodes(root) if __name__ == '__main__': main()
bsd-2-clause
RO-ny9/python-for-android
python-modules/twisted/twisted/internet/iocpreactor/tcp.py
49
20271
# Copyright (c) 2008-2010 Twisted Matrix Laboratories. # See LICENSE for details. """ TCP support for IOCP reactor """ import socket, operator, errno, struct from zope.interface import implements, directlyProvides from twisted.internet import interfaces, error, address, main, defer from twisted.internet.abstract import isIPAddress from twisted.internet.tcp import _SocketCloser, Connector as TCPConnector from twisted.python import log, failure, reflect, util from twisted.internet.iocpreactor import iocpsupport as _iocp, abstract from twisted.internet.iocpreactor.interfaces import IReadWriteHandle from twisted.internet.iocpreactor.const import ERROR_IO_PENDING from twisted.internet.iocpreactor.const import SO_UPDATE_CONNECT_CONTEXT from twisted.internet.iocpreactor.const import SO_UPDATE_ACCEPT_CONTEXT from twisted.internet.iocpreactor.const import ERROR_CONNECTION_REFUSED from twisted.internet.iocpreactor.const import ERROR_NETWORK_UNREACHABLE try: from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol except ImportError: TLSMemoryBIOProtocol = TLSMemoryBIOFactory = None _extraInterfaces = () else: _extraInterfaces = (interfaces.ITLSTransport,) # ConnectEx returns these. XXX: find out what it does for timeout connectExErrors = { ERROR_CONNECTION_REFUSED: errno.WSAECONNREFUSED, ERROR_NETWORK_UNREACHABLE: errno.WSAENETUNREACH, } class _BypassTLS(object): """ L{_BypassTLS} is used as the transport object for the TLS protocol object used to implement C{startTLS}. Its methods skip any TLS logic which C{startTLS} enables. @ivar _connection: A L{Connection} which TLS has been started on which will be proxied to by this object. Any method which has its behavior altered after C{startTLS} will be skipped in favor of the base class's implementation. This allows the TLS protocol object to have direct access to the transport, necessary to actually implement TLS. """ def __init__(self, connection): self._connection = connection def __getattr__(self, name): return getattr(self._connection, name) def write(self, data): return abstract.FileHandle.write(self._connection, data) def writeSequence(self, iovec): return abstract.FileHandle.writeSequence(self._connection, iovec) def loseConnection(self, reason=None): return abstract.FileHandle.loseConnection(self._connection, reason) class Connection(abstract.FileHandle, _SocketCloser): """ @ivar _tls: C{False} to indicate the connection is in normal TCP mode, C{True} to indicate that TLS has been started and that operations must be routed through the L{TLSMemoryBIOProtocol} instance. @ivar _tlsClientDefault: A flag which must be set by a subclass. If set to C{True}, L{startTLS} will default to initiating SSL as a client. If set to C{False}, L{startTLS} will default to initiating SSL as a server. """ implements(IReadWriteHandle, interfaces.ITCPTransport, interfaces.ISystemHandle, *_extraInterfaces) _tls = False def __init__(self, sock, proto, reactor=None): abstract.FileHandle.__init__(self, reactor) self.socket = sock self.getFileHandle = sock.fileno self.protocol = proto def getHandle(self): return self.socket def dataReceived(self, rbuffer): # XXX: some day, we'll have protocols that can handle raw buffers self.protocol.dataReceived(str(rbuffer)) def readFromHandle(self, bufflist, evt): return _iocp.recv(self.getFileHandle(), bufflist, evt) def writeToHandle(self, buff, evt): return _iocp.send(self.getFileHandle(), buff, evt) def _closeWriteConnection(self): try: getattr(self.socket, self._socketShutdownMethod)(1) except socket.error: pass p = interfaces.IHalfCloseableProtocol(self.protocol, None) if p: try: p.writeConnectionLost() except: f = failure.Failure() log.err() self.connectionLost(f) def readConnectionLost(self, reason): p = interfaces.IHalfCloseableProtocol(self.protocol, None) if p: try: p.readConnectionLost() except: log.err() self.connectionLost(failure.Failure()) else: self.connectionLost(reason) def connectionLost(self, reason): abstract.FileHandle.connectionLost(self, reason) self._closeSocket() protocol = self.protocol del self.protocol del self.socket del self.getFileHandle protocol.connectionLost(reason) def logPrefix(self): """ Return the prefix to log with when I own the logging thread. """ return self.logstr def getTcpNoDelay(self): return operator.truth(self.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)) def setTcpNoDelay(self, enabled): self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, enabled) def getTcpKeepAlive(self): return operator.truth(self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE)) def setTcpKeepAlive(self, enabled): self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, enabled) if TLSMemoryBIOFactory is not None: def startTLS(self, contextFactory, normal=True): """ @see: L{ITLSTransport.startTLS} """ # Figure out which direction the SSL goes in. If normal is True, # we'll go in the direction indicated by the subclass. Otherwise, # we'll go the other way (client = not normal ^ _tlsClientDefault, # in other words). if normal: client = self._tlsClientDefault else: client = not self._tlsClientDefault tlsFactory = TLSMemoryBIOFactory(contextFactory, client, None) tlsProtocol = TLSMemoryBIOProtocol(tlsFactory, self.protocol, False) self.protocol = tlsProtocol self.getHandle = tlsProtocol.getHandle self.getPeerCertificate = tlsProtocol.getPeerCertificate # Mark the transport as secure. directlyProvides(self, interfaces.ISSLTransport) # Remember we did this so that write and writeSequence can send the # data to the right place. self._tls = True # Hook it up self.protocol.makeConnection(_BypassTLS(self)) def write(self, data): """ Write some data, either directly to the underlying handle or, if TLS has been started, to the L{TLSMemoryBIOProtocol} for it to encrypt and send. @see: L{ITCPTransport.write} """ if self._tls: self.protocol.write(data) else: abstract.FileHandle.write(self, data) def writeSequence(self, iovec): """ Write some data, either directly to the underlying handle or, if TLS has been started, to the L{TLSMemoryBIOProtocol} for it to encrypt and send. @see: L{ITCPTransport.writeSequence} """ if self._tls: self.protocol.writeSequence(iovec) else: abstract.FileHandle.writeSequence(self, iovec) def loseConnection(self, reason=None): """ Close the underlying handle or, if TLS has been started, first shut it down. @see: L{ITCPTransport.loseConnection} """ if self._tls: if self.connected and not self.disconnecting: self.protocol.loseConnection() else: abstract.FileHandle.loseConnection(self, reason) class Client(Connection): addressFamily = socket.AF_INET socketType = socket.SOCK_STREAM _tlsClientDefault = True def __init__(self, host, port, bindAddress, connector, reactor): self.connector = connector self.addr = (host, port) self.reactor = reactor # ConnectEx documentation says socket _has_ to be bound if bindAddress is None: bindAddress = ('', 0) try: try: skt = reactor.createSocket(self.addressFamily, self.socketType) except socket.error, se: raise error.ConnectBindError(se[0], se[1]) else: try: skt.bind(bindAddress) except socket.error, se: raise error.ConnectBindError(se[0], se[1]) self.socket = skt Connection.__init__(self, skt, None, reactor) reactor.callLater(0, self.resolveAddress) except error.ConnectBindError, err: reactor.callLater(0, self.failIfNotConnected, err) def resolveAddress(self): if isIPAddress(self.addr[0]): self._setRealAddress(self.addr[0]) else: d = self.reactor.resolve(self.addr[0]) d.addCallbacks(self._setRealAddress, self.failIfNotConnected) def _setRealAddress(self, address): self.realAddress = (address, self.addr[1]) self.doConnect() def failIfNotConnected(self, err): if (self.connected or self.disconnected or not hasattr(self, "connector")): return try: self._closeSocket() except AttributeError: pass else: del self.socket, self.getFileHandle self.reactor.removeActiveHandle(self) self.connector.connectionFailed(failure.Failure(err)) del self.connector def stopConnecting(self): """ Stop attempt to connect. """ self.failIfNotConnected(error.UserError()) def cbConnect(self, rc, bytes, evt): if rc: rc = connectExErrors.get(rc, rc) self.failIfNotConnected(error.getConnectError((rc, errno.errorcode.get(rc, 'Unknown error')))) else: self.socket.setsockopt(socket.SOL_SOCKET, SO_UPDATE_CONNECT_CONTEXT, struct.pack('I', self.socket.fileno())) self.protocol = self.connector.buildProtocol(self.getPeer()) self.connected = True self.logstr = self.protocol.__class__.__name__+",client" self.protocol.makeConnection(self) self.startReading() def doConnect(self): if not hasattr(self, "connector"): # this happens if we connector.stopConnecting in # factory.startedConnecting return assert _iocp.have_connectex self.reactor.addActiveHandle(self) evt = _iocp.Event(self.cbConnect, self) rc = _iocp.connect(self.socket.fileno(), self.realAddress, evt) if rc == ERROR_IO_PENDING: return else: evt.ignore = True self.cbConnect(rc, 0, 0, evt) def getHost(self): """ Returns an IPv4Address. This indicates the address from which I am connecting. """ return address.IPv4Address('TCP', *(self.socket.getsockname() + ('INET',))) def getPeer(self): """ Returns an IPv4Address. This indicates the address that I am connected to. """ return address.IPv4Address('TCP', *(self.realAddress + ('INET',))) def __repr__(self): s = ('<%s to %s at %x>' % (self.__class__, self.addr, util.unsignedID(self))) return s def connectionLost(self, reason): if not self.connected: self.failIfNotConnected(error.ConnectError(string=reason)) else: Connection.connectionLost(self, reason) self.connector.connectionLost(reason) class Server(Connection): """ Serverside socket-stream connection class. I am a serverside network connection transport; a socket which came from an accept() on a server. """ _tlsClientDefault = False def __init__(self, sock, protocol, clientAddr, serverAddr, sessionno, reactor): """ Server(sock, protocol, client, server, sessionno) Initialize me with a socket, a protocol, a descriptor for my peer (a tuple of host, port describing the other end of the connection), an instance of Port, and a session number. """ Connection.__init__(self, sock, protocol, reactor) self.serverAddr = serverAddr self.clientAddr = clientAddr self.sessionno = sessionno self.logstr = "%s,%s,%s" % (self.protocol.__class__.__name__, sessionno, self.clientAddr.host) self.repstr = "<%s #%s on %s>" % (self.protocol.__class__.__name__, self.sessionno, self.serverAddr.port) self.connected = True self.startReading() def __repr__(self): """ A string representation of this connection. """ return self.repstr def getHost(self): """ Returns an IPv4Address. This indicates the server's address. """ return self.serverAddr def getPeer(self): """ Returns an IPv4Address. This indicates the client's address. """ return self.clientAddr class Connector(TCPConnector): def _makeTransport(self): return Client(self.host, self.port, self.bindAddress, self, self.reactor) class Port(_SocketCloser): implements(interfaces.IListeningPort) connected = False disconnected = False disconnecting = False addressFamily = socket.AF_INET socketType = socket.SOCK_STREAM sessionno = 0 maxAccepts = 100 # Actual port number being listened on, only set to a non-None # value when we are actually listening. _realPortNumber = None def __init__(self, port, factory, backlog=50, interface='', reactor=None): self.port = port self.factory = factory self.backlog = backlog self.interface = interface self.reactor = reactor def __repr__(self): if self._realPortNumber is not None: return "<%s of %s on %s>" % (self.__class__, self.factory.__class__, self._realPortNumber) else: return "<%s of %s (not listening)>" % (self.__class__, self.factory.__class__) def startListening(self): try: skt = self.reactor.createSocket(self.addressFamily, self.socketType) # TODO: resolve self.interface if necessary skt.bind((self.interface, self.port)) except socket.error, le: raise error.CannotListenError, (self.interface, self.port, le) self.addrLen = _iocp.maxAddrLen(skt.fileno()) # Make sure that if we listened on port 0, we update that to # reflect what the OS actually assigned us. self._realPortNumber = skt.getsockname()[1] log.msg("%s starting on %s" % (self.factory.__class__, self._realPortNumber)) self.factory.doStart() skt.listen(self.backlog) self.connected = True self.disconnected = False self.reactor.addActiveHandle(self) self.socket = skt self.getFileHandle = self.socket.fileno self.doAccept() def loseConnection(self, connDone=failure.Failure(main.CONNECTION_DONE)): """ Stop accepting connections on this port. This will shut down my socket and call self.connectionLost(). It returns a deferred which will fire successfully when the port is actually closed. """ self.disconnecting = True if self.connected: self.deferred = defer.Deferred() self.reactor.callLater(0, self.connectionLost, connDone) return self.deferred stopListening = loseConnection def _logConnectionLostMsg(self): """ Log message for closing port """ log.msg('(TCP Port %s Closed)' % (self._realPortNumber,)) def connectionLost(self, reason): """ Cleans up the socket. """ self._logConnectionLostMsg() self._realPortNumber = None d = None if hasattr(self, "deferred"): d = self.deferred del self.deferred self.disconnected = True self.reactor.removeActiveHandle(self) self.connected = False self._closeSocket() del self.socket del self.getFileHandle try: self.factory.doStop() except: self.disconnecting = False if d is not None: d.errback(failure.Failure()) else: raise else: self.disconnecting = False if d is not None: d.callback(None) def logPrefix(self): """ Returns the name of my class, to prefix log entries with. """ return reflect.qual(self.factory.__class__) def getHost(self): """ Returns an IPv4Address. This indicates the server's address. """ return address.IPv4Address('TCP', *(self.socket.getsockname() + ('INET',))) def cbAccept(self, rc, bytes, evt): self.handleAccept(rc, evt) if not (self.disconnecting or self.disconnected): self.doAccept() def handleAccept(self, rc, evt): if self.disconnecting or self.disconnected: return False # possible errors: # (WSAEMFILE, WSAENOBUFS, WSAENFILE, WSAENOMEM, WSAECONNABORTED) if rc: log.msg("Could not accept new connection -- %s (%s)" % (errno.errorcode.get(rc, 'unknown error'), rc)) return False else: evt.newskt.setsockopt(socket.SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, struct.pack('I', self.socket.fileno())) family, lAddr, rAddr = _iocp.get_accept_addrs(evt.newskt.fileno(), evt.buff) assert family == self.addressFamily protocol = self.factory.buildProtocol( address._ServerFactoryIPv4Address('TCP', rAddr[0], rAddr[1])) if protocol is None: evt.newskt.close() else: s = self.sessionno self.sessionno = s+1 transport = Server(evt.newskt, protocol, address.IPv4Address('TCP', rAddr[0], rAddr[1], 'INET'), address.IPv4Address('TCP', lAddr[0], lAddr[1], 'INET'), s, self.reactor) protocol.makeConnection(transport) return True def doAccept(self): numAccepts = 0 while 1: evt = _iocp.Event(self.cbAccept, self) # see AcceptEx documentation evt.buff = buff = _iocp.AllocateReadBuffer(2 * (self.addrLen + 16)) evt.newskt = newskt = self.reactor.createSocket(self.addressFamily, self.socketType) rc = _iocp.accept(self.socket.fileno(), newskt.fileno(), buff, evt) if (rc == ERROR_IO_PENDING or (not rc and numAccepts >= self.maxAccepts)): break else: evt.ignore = True if not self.handleAccept(rc, evt): break numAccepts += 1
apache-2.0
dudanogueira/microerp
microerp/cadastro/management/commands/importar_clientes.py
1
7403
# -*- coding: utf-8 -*- from django.core.management.base import BaseCommand from cadastro.models import Cliente, Cidade, Ramo, Bairro from optparse import make_option import os, csv, datetime from django.utils.encoding import smart_unicode, smart_str class Command(BaseCommand): help = ''' Sincroniza para a base de clientes uma planilha conforme: CODIGO,NOME,FANTASIA,CONTATO,TELEFONE,CELULAR,CNPJ,CPF,IE,RG,ENDERECO,COMPLEMENTO,BAIRRO,NUMERO,CEP,CIDADE,UF,EMAIL,FIS_JUR,TIPO,COD_CONVENIO,CONVENIO,ULTIMA_VENDA,NASCIMENTO,DATA_CADASTRO,A_RECEBER,ATRASADO,RECEBIDO,LIMITE_CREDITO,CONCEDER_CREDITO,ATIVO ''' args = "--file arquivo.csv," option_list = BaseCommand.option_list + ( make_option('--file', action='store_true', dest='arquivo', help='Importa uma lista de Clientes em formato CSV', ), ) def handle(self, *args, **options): arquivo = options.get('arquivo') if options['arquivo']: f = args[0] try: if os.path.isfile(f): reader = csv.DictReader(open(f, 'r')) for row in reader: print '-'*50 id_referencia = int(row['CODIGO']) cidade = str(row['CIDADE']) or u"Não informado" estado = str(row['UF']) nome_bairro = str(row['BAIRRO']) or u"Não informado" nome_cliente = str(row['NOME']) nome_fantasia = str(row['FANTASIA']) email = str(row['EMAIL']) # data cadastro data_cadastro = datetime.datetime.strptime(str(row['DATA_CADASTRO']), "%d.%m.%Y") nascimento = str(row['NASCIMENTO']) if nascimento == '': nascimento = None else: nascimento = datetime.datetime.strptime(nascimento, "%d.%m.%Y") contato = str(row['CONTATO']) fantasia = str(row['FANTASIA']) rua = str(row['ENDERECO']) complemento = str(row['COMPLEMENTO']) numero = str(row['NUMERO']) cep = str(row['CEP']) tipo_cliente = str(row['FIS_JUR']) telefone = str(row['TELEFONE']) or None celular = str(row['CELULAR']) or None conceder_credito = str(row['CONCEDER_CREDITO']) or None limite_credito = str(row['LIMITE_CREDITO']) or 0 ie = str(row['IE']) or None rg = str(row['RG']) or None try: ativo = str(row['ATIVO']) except: ativo = "Sim" print "ID LEGADO:",id_referencia print "NOME CLIENTE:",nome_cliente # DEFINE CIDADE cidade,created = Cidade.objects.get_or_create( nome=cidade, estado=estado ) cidade_nome = smart_unicode(cidade.nome) if created: print "CIDADE *CRIADA*:",cidade_nome else: print "CIDADE:",cidade_nome # DEFINE BAIRRO bairro = Bairro.objects.filter( cidade=cidade, nome=nome_bairro ).first() if bairro: print "BAIRRO:",nome_bairro else: print "BAIRRO *CRIADO*:",nome_bairro bairro = Bairro.objects.create( cidade=cidade, nome=nome_bairro ) # DEFINE TIPO DE CLIENTE print "TIPO:",tipo_cliente if tipo_cliente[0] == u"F": tipo = 'pf' cpf = str(row['CPF']) print "CPF:",cpf else: tipo = 'pj' cnpj = str(row['CNPJ']) print "CNPJ",cnpj # DEFINO ATIVO OU NAO if ativo.lower() == "sim": cliente_ativo = True else: cliente_ativo = False print "ATIVO", ativo # DEFINE CREDITO if conceder_credito.lower() == "sim": conceder_credito = True else: conceder_credito = False print "CONCEDER CREDITO", conceder_credito print "LIMITE DE CREDITO", limite_credito # DEFINE CLIENTE try: cliente = Cliente.objects.get( id_referencia=id_referencia ) print "CLIENTE %s Já importado. Atualizando" % cliente except Cliente.DoesNotExist: print "NOVO CLIENTE!" cliente = Cliente.objects.create( id_referencia=id_referencia ) cliente.nome = nome_cliente cliente.fantasia = nome_fantasia cliente.nascimento = nascimento cliente.tipo = tipo cliente.email = email cliente.contato = contato cliente.criado = data_cadastro cliente.ativo = cliente_ativo cliente.conceder_credito = conceder_credito cliente.limite_credito = limite_credito if cliente.tipo == 'pj': cliente.cnpj = cnpj else: cliente.cpf = cpf cliente.rg = rg cliente.inscricao_estadual = ie print "CONTATO",contato print "TELEFONE:",telefone print "CELULAR:",celular cliente.telefone_fixo = telefone cliente.telefone_celular = celular # CRIAR ENDEREÇO telelefone_associado = telefone or celular endereco,created = cliente.enderecocliente_set.get_or_create(bairro=bairro, rua=rua, numero=numero, telefone=telelefone_associado, cep=cep, complemento=complemento) if created: print "ENDERECO CRIADO" #cliente.clean() cliente.save() except: raise else: print self.help print self.args
lgpl-3.0
huntxu/neutron
neutron/auth.py
3
1694
# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import context from oslo_config import cfg from oslo_log import log as logging from oslo_middleware import base import webob.dec import webob.exc LOG = logging.getLogger(__name__) class NeutronKeystoneContext(base.ConfigurableMiddleware): """Make a request context from keystone headers.""" @webob.dec.wsgify def __call__(self, req): ctx = context.Context.from_environ(req.environ) if not ctx.user_id: LOG.debug("X_USER_ID is not found in request") return webob.exc.HTTPUnauthorized() # Inject the context... req.environ['neutron.context'] = ctx return self.application def pipeline_factory(loader, global_conf, **local_conf): """Create a paste pipeline based on the 'auth_strategy' config option.""" pipeline = local_conf[cfg.CONF.auth_strategy] pipeline = pipeline.split() filters = [loader.get_filter(n) for n in pipeline[:-1]] app = loader.get_app(pipeline[-1]) filters.reverse() for filter in filters: app = filter(app) return app
apache-2.0
jindongh/kombu
kombu/transport/pyro.py
40
2468
""" kombu.transport.pyro ====================== Pyro transport. Requires the :mod:`Pyro4` library to be installed. """ from __future__ import absolute_import import sys from kombu.five import reraise from kombu.utils import cached_property from . import virtual try: import Pyro4 as pyro from Pyro4.errors import NamingError except ImportError: # pragma: no cover pyro = NamingError = None # noqa DEFAULT_PORT = 9090 E_LOOKUP = """\ Unable to locate pyro nameserver {0.virtual_host} on host {0.hostname}\ """ class Channel(virtual.Channel): def queues(self): return self.shared_queues.get_queue_names() def _new_queue(self, queue, **kwargs): if queue not in self.queues(): self.shared_queues.new_queue(queue) def _get(self, queue, timeout=None): queue = self._queue_for(queue) msg = self.shared_queues._get(queue) return msg def _queue_for(self, queue): if queue not in self.queues(): self.shared_queues.new_queue(queue) return queue def _put(self, queue, message, **kwargs): queue = self._queue_for(queue) self.shared_queues._put(queue, message) def _size(self, queue): return self.shared_queues._size(queue) def _delete(self, queue, *args): self.shared_queues._delete(queue) def _purge(self, queue): return self.shared_queues._purge(queue) def after_reply_message_received(self, queue): pass @cached_property def shared_queues(self): return self.connection.shared_queues class Transport(virtual.Transport): Channel = Channel #: memory backend state is global. state = virtual.BrokerState() default_port = DEFAULT_PORT driver_type = driver_name = 'pyro' def _open(self): conninfo = self.client pyro.config.HMAC_KEY = conninfo.virtual_host try: nameserver = pyro.locateNS(host=conninfo.hostname, port=self.default_port) # name of registered pyro object uri = nameserver.lookup(conninfo.virtual_host) return pyro.Proxy(uri) except NamingError: reraise(NamingError, NamingError(E_LOOKUP.format(conninfo)), sys.exc_info()[2]) def driver_version(self): return pyro.__version__ @cached_property def shared_queues(self): return self._open()
bsd-3-clause
taknira/certificate-transparency
python/utilities/log_list/print_log_list.py
3
4188
#!/usr/bin/env python """Parse and print the list of logs, after validating signature.""" import base64 import hashlib import json import os import sys import time import gflags import jsonschema import M2Crypto from cpp_generator import generate_cpp_header from java_generator import generate_java_source from openssl_generator import generate_openssl_conf FLAGS = gflags.FLAGS gflags.DEFINE_string("log_list", None, "Logs list file to parse and print.") gflags.MarkFlagAsRequired("log_list") gflags.DEFINE_string("signature", None, "Signature file over the list of logs.") gflags.DEFINE_string("signer_key", None, "Public key of the log list signer.") gflags.DEFINE_string("log_list_schema", os.path.join(os.path.dirname(sys.argv[0]), "data", "log_list_schema.json"), "JSON schema for the list of logs.") gflags.DEFINE_string("header_output", None, "If specifed, generates C++ code for Chromium.") gflags.DEFINE_string("java_output", None, "If specifed, generates Java code.") gflags.DEFINE_string("java_class", "org.conscrypt.ct.KnownLogs", "Fully qualified name of the generated class.") gflags.DEFINE_string("openssl_output", None, "If specified, generates a CONF file for OpenSSL.") gflags.DEFINE_boolean("skip_signature_check", False, "Skip signature check (only validate schema).") def is_log_list_valid(json_log_list, schema_file): try: jsonschema.validate( json_log_list, json.load(open(schema_file, "rb"))) return True except jsonschema.exceptions.ValidationError as e: print e return False return False def is_signature_valid(log_list_data, signature_file, public_key_file): loaded_pubkey = M2Crypto.RSA.load_pub_key(public_key_file) pubkey = M2Crypto.EVP.PKey() pubkey.assign_rsa(loaded_pubkey) pubkey.reset_context(md="sha256") pubkey.verify_init() pubkey.verify_update(log_list_data) return pubkey.verify_final(open(signature_file, "rb").read()) def print_formatted_log_list(json_log_list): operator_id_to_name = dict( [(o["id"], o["name"]) for o in json_log_list["operators"]]) for log_info in json_log_list["logs"]: print "%s:" % log_info["description"] log_operators = [ operator_id_to_name[i].encode("utf-8") for i in log_info["operated_by"]] print " Operated by %s and has MMD of %f hours" % ( ", ".join(log_operators), log_info["maximum_merge_delay"] / (60.0 ** 2)) print " At: %s" % (log_info["url"]) key = base64.decodestring(log_info["key"]) hasher = hashlib.sha256() hasher.update(key) key_hash = hasher.digest() print " Key ID: %s" % (base64.encodestring(key_hash)), if "final_sth" in log_info: final_sth = log_info["final_sth"] print " Log is frozen as of %s, final tree size %d" % ( time.asctime(time.gmtime(final_sth["timestamp"] / 1000.0)), final_sth["tree_size"]) print "-" * 80 def run(): with open(FLAGS.log_list, "rb") as f: json_data = f.read() if (not FLAGS.skip_signature_check) and not is_signature_valid( json_data, FLAGS.signature, FLAGS.signer_key): print "ERROR: Signature over list of logs is not valid, not proceeding." sys.exit(1) parsed_json = json.loads(json_data) if not is_log_list_valid(parsed_json, FLAGS.log_list_schema): print "ERROR: Log list is signed but does not conform to the schema." sys.exit(2) if FLAGS.header_output: generate_cpp_header(parsed_json, FLAGS.header_output) if FLAGS.java_output: generate_java_source(parsed_json, FLAGS.java_output, FLAGS.java_class) if FLAGS.openssl_output: generate_openssl_conf(parsed_json, FLAGS.openssl_output) if not FLAGS.header_output and not FLAGS.java_output: print_formatted_log_list(parsed_json) if __name__ == "__main__": sys.argv = FLAGS(sys.argv) run()
apache-2.0
MikeAmy/django
tests/auth_tests/models/custom_permissions.py
33
1381
""" The CustomPermissionsUser users email as the identifier, but uses the normal Django permissions model. This allows us to check that the PermissionsMixin includes everything that is needed to interact with the ModelBackend. """ from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin from django.contrib.auth.tests.custom_user import ( CustomUserManager, RemoveGroupsAndPermissions, ) from django.db import models from django.utils.encoding import python_2_unicode_compatible class CustomPermissionsUserManager(CustomUserManager): def create_superuser(self, email, password, date_of_birth): u = self.create_user(email, password=password, date_of_birth=date_of_birth) u.is_superuser = True u.save(using=self._db) return u with RemoveGroupsAndPermissions(): @python_2_unicode_compatible class CustomPermissionsUser(AbstractBaseUser, PermissionsMixin): email = models.EmailField(verbose_name='email address', max_length=255, unique=True) date_of_birth = models.DateField() custom_objects = CustomPermissionsUserManager() USERNAME_FIELD = 'email' REQUIRED_FIELDS = ['date_of_birth'] def get_full_name(self): return self.email def get_short_name(self): return self.email def __str__(self): return self.email
bsd-3-clause
sparkslabs/kamaelia
Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Apps/JMB/WSGI/__init__.py
6
7203
# -*- coding: utf-8 -*- # Needed to allow import # # Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1) # # (1) Kamaelia Contributors are listed in the AUTHORS file and at # http://www.kamaelia.org/AUTHORS - please extend this file, # not this notice. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------- """ WSGI Handler ============= NOTE: This is experimental software. It has not been fully tested and will probably break or behave in unexpected ways. This is the WSGI handler for ServerCore. It will wait on the HTTPParser to transmit the body in full before proceeding. Thus, it is probably not a good idea to use any WSGI apps requiring a lot of large file uploads (although it could theoretically function fairly well for that purpose as long as the concurrency level is relatively low). For more information on WSGI, what it is, and to get a general overview of what this component is intended to adapt the ServerCore to do, see one of the following links: * http://www.python.org/dev/peps/pep-0333/ (PEP 333) * http://www.wsgi.org/wsgi/ (WsgiStart wiki) * http://en.wikipedia.org/wiki/Web_Server_Gateway_Interface (Wikipedia article on WSGI) ------------- Dependencies ------------- This component depends on the wsgiref module, which is included with python 2.5. Thus if you're using an older version, you will need to install it before using this component. The easiest way to install wsgiref is to use easy_install, which may be downloaded from http://peak.telecommunity.com/DevCenter/EasyInstall . You may then install wsgiref using the command "sudo easy_install wsgiref" (without the quotes of course). ------------------ Factory Functions ------------------ The WSGI Handler may currently be instantiated in two ways: using SimpleWSGIFactory and using WSGIFactory. Use SimpleWSGIFactory if you would like to create a WSGI Handler but know that you will only use one WSGI Application for that handler. WSGIFactory is a more advanced factory function that will use built in URL handling to look up WSGI Applications. SimpleWSGIFactory ~~~~~~~~~~~~~~~~~~ Creates a WSGI Handler that can handle only one WSGI Application. WSGIConfig - see the WsgiConfig section below app_object - The WSGI application object to run error_log - The file to store errors in logger_name - The name of the python logger to log errors to WSGIFactory ~~~~~~~~~~~~ Creates a WSGI Handler using url routing. WSGIConfig - see the WSGIConfig section below url_list - A URL list to look up App objects. It must contain three keys: kp.regex - the regex to match the uri against (will only match the first section) kp.import_path - The path to import the WSGI application object from kp.app_object - the attribute of the module named in kp.import_path that names the WSGI application object error_log - The file to store errors in logger_name - The name of the python logger to log errors to -------------------------------- How do I use SimpleWSGIFactory -------------------------------- To make a WSGI system using SimpleWSGIFactory, use the following: import socket import Axon from Kamaelia.Protocol.HTTP.Handlers.WSGI import SimpleWSGIFactory from Kamaelia.Chassis.ConnectedServer import ServerCore from Kamaelia.Protocol.HTTP import ErrorPages from Kamaelia.Support.Protocol.HTTP import HTTPProtocol from Kamaelia.Apps.WSGI.Simple import simple_app port = 8080 #This is just a configuration dictionary for general WSGI stuff. This needs to be passed to the handler #to run WsgiConfig ={ 'server_software' : "Simple Example WSGI Web Server", 'server_admin' : "Jason Baker", 'wsgi_ver' : (1,0), } def main(): #This line is so that the HTTPRequestHandler knows what component to route requests to. routing = [ ['/simple', SimpleWSGIFactory(WsgiConfig, simple_app)] ] server = ServerCore(protocol=HTTPProtocol(routing), port=port, socketOptions=(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)) print 'Serving on port %s' % (port) server.run() if __name__ == '__main__': main() ----------------------------- How do I use WSGIFactory? ----------------------------- Here is an example of how to create a simple WSGI server using WSGIFactory: from Kamaelia.Support.Protocol.HTTP import HTTPProtocol from Kamaelia.Protocol.HTTP.Handlers.WSGI import WSGIFactory WsgiConfig = { 'wsgi_ver' : (1, 0), 'server_admin' : 'Jason Baker', 'server_software' : 'Kamaelia Publish' } url_list = [ #Note that this is a list of dictionaries. Order is important. { 'kp.regex' : 'simple', 'kp.import_path' : 'Kamaelia.Apps.WSGI.Simple', 'kp.app_obj' : 'simple_app', } { 'kp.regex' : '.*', #The .* means that this is a 404 handler 'kp.import_path' : 'Kamaelia.Apps.WSGI.ErrorHandler', 'kp.app_obj' : 'application', } ] routing = [['/', WsgiFactory(WsgiConfig, url_list)]] ServerCore( protocol=HTTPProtocol(routing), port=8080, socketOptions=(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)).run() ------------------ Internal overview ------------------ request object ~~~~~~~~~~~~~~~ Note that certain WSGI applications will require configuration data from the urls file. If you use the WSGIFactory to run this handler, all options specified in the urls list will be put into the environment variable with a kp. in front of them. For example, the 'regex' entry in a urls file would go into the environ dictionary like this if it was set to 'simple': { ... 'kp.regex' : 'simple', ... } wsgi.input ~~~~~~~~~~~ PEP 333 requires that the WSGI environ dictionary also contain a file-like object that holds the body of the request. Currently, the WsgiHandler will wait for the full request before starting the application (which is not optimal behavior). If the method is not PUT or POST, the handler will use a pre-made null-file object that will always return empty data. This is an optimization to lower peak memory usage and to speed things up. WSGIConfig ~~~~~~~~~~~ The WSGI Handler requires a WSGIConfig dictonary for general configuration info. The following items are required to be defined: * wsgi_ver: the WSGI version as a Tuple. You want to use (1, 0) * server_admin: the name and/or email address of the server's administrator * server_software: The software and/or software version that runs the server FIXME: It would be nice if the WsgiConfig were made into an object rather than a dictionary. """ from _factory import WSGIFactory, SimpleWSGIFactory
apache-2.0
Tripwire/TARDIS
dependencies.py
6
1998
try: import subprocess except ImportError: print "No subprocess!" try: import json except ImportError: print "No json!" try: import os except ImportError: print "No os!" try: import re except ImportError: print "No re!" try: import sys except ImportError: print "No sys!" try: import datetime except ImportError: print "No datetime!" try: import socket except ImportError: print "No socket!" try: import struct except ImportError: print "No struct!" try: import urllib2 except ImportError: print "No urllib2!" try: import pxssh except ImportError: print "No pexpext/pxssh, try pip install pexpect" try: import csv except ImportError: print "No csv!" try: import shutil except ImportError: print "No shutil!" try: import argparse except ImportError: print "No argparse, try pip install argparse" try: import base64 except ImportError: print "No base64!" try: import cookielib except ImportError: print "No cookielib!" try: import email except ImportError: print "No email!" try: import requests except ImportError: print "No requests, try pip install requests" try: import xml.etree.ElementTree as ET except ImportError: print "No xml etree!" try: from collections import defaultdict except ImportError: print "No collections defaultdict!" try: import dateutil.parser except ImportError: print "No dateutil parser, try pip install python-dateutil" try: import mysql.connector except ImportError: print "No mysql connector, try pip install --allow-external mysql-connector-python mysql-connector-python" try: from elasticsearch import Elasticsearch except ImportError: print "No elasticsearch, try pip install elasticsearch" try: import splunklib.client as client except ImportError: print "No splunk, try pip install splunk-sdk" try: from stix.core import STIXPackage except ImportError: print "No STIX, try pip intsall stix" try: from cybox.objects.win_event_log_object import WinEventLog except ImportError: print "No TAXI, try pip intsall stix"
apache-2.0
StackStorm/st2
st2common/st2common/persistence/auth.py
3
3430
# Copyright 2020 The StackStorm Authors. # Copyright 2019 Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from st2common.exceptions.auth import ( TokenNotFoundError, ApiKeyNotFoundError, UserNotFoundError, AmbiguousUserError, NoNicknameOriginProvidedError, ) from st2common.models.db import MongoDBAccess from st2common.models.db.auth import UserDB, TokenDB, ApiKeyDB from st2common.persistence.base import Access from st2common.util import hash as hash_utils class User(Access): impl = MongoDBAccess(UserDB) @classmethod def get(cls, username): return cls.get_by_name(username) @classmethod def get_by_nickname(cls, nickname, origin): if not origin: raise NoNicknameOriginProvidedError() result = cls.query(**{("nicknames__%s" % origin): nickname}) if not result.first(): raise UserNotFoundError() if result.count() > 1: raise AmbiguousUserError() return result.first() @classmethod def _get_impl(cls): return cls.impl @classmethod def _get_by_object(cls, object): # For User name is unique. name = getattr(object, "name", "") return cls.get_by_name(name) class Token(Access): impl = MongoDBAccess(TokenDB) @classmethod def _get_impl(cls): return cls.impl @classmethod def add_or_update(cls, model_object, publish=True, validate=True): if not getattr(model_object, "user", None): raise ValueError("User is not provided in the token.") if not getattr(model_object, "token", None): raise ValueError("Token value is not set.") if not getattr(model_object, "expiry", None): raise ValueError("Token expiry is not provided in the token.") return super(Token, cls).add_or_update( model_object, publish=publish, validate=validate ) @classmethod def get(cls, value): result = cls.query(token=value).first() if not result: raise TokenNotFoundError() return result class ApiKey(Access): impl = MongoDBAccess(ApiKeyDB) @classmethod def _get_impl(cls): return cls.impl @classmethod def get(cls, value): # DB does not contain key but the key_hash. value_hash = hash_utils.hash(value) result = cls.query(key_hash=value_hash).first() if not result: raise ApiKeyNotFoundError("ApiKey with key_hash=%s not found." % value_hash) return result @classmethod def get_by_key_or_id(cls, value): try: return cls.get(value) except ApiKeyNotFoundError: pass try: return cls.get_by_id(value) except: raise ApiKeyNotFoundError("ApiKey with key or id=%s not found." % value)
apache-2.0
MalloyPower/parsing-python
front-end/testsuite-python-lib/Python-3.5.0/Lib/test/test_list.py
7
4041
import sys from test import support, list_tests import pickle class ListTest(list_tests.CommonTest): type2test = list def test_basic(self): self.assertEqual(list([]), []) l0_3 = [0, 1, 2, 3] l0_3_bis = list(l0_3) self.assertEqual(l0_3, l0_3_bis) self.assertTrue(l0_3 is not l0_3_bis) self.assertEqual(list(()), []) self.assertEqual(list((0, 1, 2, 3)), [0, 1, 2, 3]) self.assertEqual(list(''), []) self.assertEqual(list('spam'), ['s', 'p', 'a', 'm']) if sys.maxsize == 0x7fffffff: # This test can currently only work on 32-bit machines. # XXX If/when PySequence_Length() returns a ssize_t, it should be # XXX re-enabled. # Verify clearing of bug #556025. # This assumes that the max data size (sys.maxint) == max # address size this also assumes that the address size is at # least 4 bytes with 8 byte addresses, the bug is not well # tested # # Note: This test is expected to SEGV under Cygwin 1.3.12 or # earlier due to a newlib bug. See the following mailing list # thread for the details: # http://sources.redhat.com/ml/newlib/2002/msg00369.html self.assertRaises(MemoryError, list, range(sys.maxsize // 2)) # This code used to segfault in Py2.4a3 x = [] x.extend(-y for y in x) self.assertEqual(x, []) def test_truth(self): super().test_truth() self.assertTrue(not []) self.assertTrue([42]) def test_identity(self): self.assertTrue([] is not []) def test_len(self): super().test_len() self.assertEqual(len([]), 0) self.assertEqual(len([0]), 1) self.assertEqual(len([0, 1, 2]), 3) def test_overflow(self): lst = [4, 5, 6, 7] n = int((sys.maxsize*2+2) // len(lst)) def mul(a, b): return a * b def imul(a, b): a *= b self.assertRaises((MemoryError, OverflowError), mul, lst, n) self.assertRaises((MemoryError, OverflowError), imul, lst, n) def test_repr_large(self): # Check the repr of large list objects def check(n): l = [0] * n s = repr(l) self.assertEqual(s, '[' + ', '.join(['0'] * n) + ']') check(10) # check our checking code check(1000000) def test_iterator_pickle(self): # Userlist iterators don't support pickling yet since # they are based on generators. data = self.type2test([4, 5, 6, 7]) for proto in range(pickle.HIGHEST_PROTOCOL + 1): it = itorg = iter(data) d = pickle.dumps(it, proto) it = pickle.loads(d) self.assertEqual(type(itorg), type(it)) self.assertEqual(self.type2test(it), self.type2test(data)) it = pickle.loads(d) next(it) d = pickle.dumps(it, proto) self.assertEqual(self.type2test(it), self.type2test(data)[1:]) def test_reversed_pickle(self): data = self.type2test([4, 5, 6, 7]) for proto in range(pickle.HIGHEST_PROTOCOL + 1): it = itorg = reversed(data) d = pickle.dumps(it, proto) it = pickle.loads(d) self.assertEqual(type(itorg), type(it)) self.assertEqual(self.type2test(it), self.type2test(reversed(data))) it = pickle.loads(d) next(it) d = pickle.dumps(it, proto) self.assertEqual(self.type2test(it), self.type2test(reversed(data))[1:]) def test_no_comdat_folding(self): # Issue 8847: In the PGO build, the MSVC linker's COMDAT folding # optimization causes failures in code that relies on distinct # function addresses. class L(list): pass with self.assertRaises(TypeError): (3,) + L([1,2]) if __name__ == "__main__": unittest.main()
mit
keiranFTW/sony-kernel-msm8660
tools/perf/scripts/python/futex-contention.py
11261
1486
# futex contention # (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com> # Licensed under the terms of the GNU GPL License version 2 # # Translation of: # # http://sourceware.org/systemtap/wiki/WSFutexContention # # to perf python scripting. # # Measures futex contention import os, sys sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from Util import * process_names = {} thread_thislock = {} thread_blocktime = {} lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time process_names = {} # long-lived pid-to-execname mapping def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, nr, uaddr, op, val, utime, uaddr2, val3): cmd = op & FUTEX_CMD_MASK if cmd != FUTEX_WAIT: return # we don't care about originators of WAKE events process_names[tid] = comm thread_thislock[tid] = uaddr thread_blocktime[tid] = nsecs(s, ns) def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, nr, ret): if thread_blocktime.has_key(tid): elapsed = nsecs(s, ns) - thread_blocktime[tid] add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed) del thread_blocktime[tid] del thread_thislock[tid] def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): for (tid, lock) in lock_waits: min, max, avg, count = lock_waits[tid, lock] print "%s[%d] lock %x contended %d times, %d avg ns" % \ (process_names[tid], tid, lock, count, avg)
gpl-2.0
destijl/forensicartifacts
frontend/thirdparty/networkx-1.9/networkx/algorithms/tests/test_simple_paths.py
42
2372
#!/usr/bin/env python from nose.tools import * import networkx as nx def test_all_simple_paths(): G = nx.path_graph(4) paths = nx.all_simple_paths(G,0,3) assert_equal(list(list(p) for p in paths),[[0,1,2,3]]) def test_all_simple_paths_cutoff(): G = nx.complete_graph(4) paths = nx.all_simple_paths(G,0,1,cutoff=1) assert_equal(list(list(p) for p in paths),[[0,1]]) paths = nx.all_simple_paths(G,0,1,cutoff=2) assert_equal(list(list(p) for p in paths),[[0,1],[0,2,1],[0,3,1]]) def test_all_simple_paths_multigraph(): G = nx.MultiGraph([(1,2),(1,2)]) paths = nx.all_simple_paths(G,1,2) assert_equal(list(list(p) for p in paths),[[1,2],[1,2]]) def test_all_simple_paths_multigraph_with_cutoff(): G = nx.MultiGraph([(1,2),(1,2),(1,10),(10,2)]) paths = nx.all_simple_paths(G,1,2, cutoff=1) assert_equal(list(list(p) for p in paths),[[1,2],[1,2]]) def test_all_simple_paths_directed(): G = nx.DiGraph() G.add_path([1,2,3]) G.add_path([3,2,1]) paths = nx.all_simple_paths(G,1,3) assert_equal(list(list(p) for p in paths),[[1,2,3]]) def test_all_simple_paths_empty(): G = nx.path_graph(4) paths = nx.all_simple_paths(G,0,3,cutoff=2) assert_equal(list(list(p) for p in paths),[]) def hamiltonian_path(G,source): source = next(G.nodes_iter()) neighbors = set(G[source])-set([source]) n = len(G) for target in neighbors: for path in nx.all_simple_paths(G,source,target): if len(path) == n: yield path def test_hamiltonian_path(): from itertools import permutations G=nx.complete_graph(4) paths = [list(p) for p in hamiltonian_path(G,0)] exact = [[0]+list(p) for p in permutations([1,2,3],3) ] assert_equal(sorted(paths),sorted(exact)) def test_cutoff_zero(): G = nx.complete_graph(4) paths = nx.all_simple_paths(G,0,3,cutoff=0) assert_equal(list(list(p) for p in paths),[]) paths = nx.all_simple_paths(nx.MultiGraph(G),0,3,cutoff=0) assert_equal(list(list(p) for p in paths),[]) @raises(nx.NetworkXError) def test_source_missing(): G = nx.Graph() G.add_path([1,2,3]) paths = list(nx.all_simple_paths(nx.MultiGraph(G),0,3)) @raises(nx.NetworkXError) def test_target_missing(): G = nx.Graph() G.add_path([1,2,3]) paths = list(nx.all_simple_paths(nx.MultiGraph(G),1,4))
apache-2.0
ryfeus/lambda-packs
Selenium_PhantomJS/source/rsa/__init__.py
79
1476
# -*- coding: utf-8 -*- # # Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """RSA module Module for calculating large primes, and RSA encryption, decryption, signing and verification. Includes generating public and private keys. WARNING: this implementation does not use random padding, compression of the cleartext input to prevent repetitions, or other common security improvements. Use with care. """ from rsa.key import newkeys, PrivateKey, PublicKey from rsa.pkcs1 import encrypt, decrypt, sign, verify, DecryptionError, \ VerificationError __author__ = "Sybren Stuvel, Barry Mead and Yesudeep Mangalapilly" __date__ = "2016-03-29" __version__ = '3.4.2' # Do doctest if we're run directly if __name__ == "__main__": import doctest doctest.testmod() __all__ = ["newkeys", "encrypt", "decrypt", "sign", "verify", 'PublicKey', 'PrivateKey', 'DecryptionError', 'VerificationError']
mit
chidea/GoPythonDLLWrapper
bin/lib/site-packages/pip/_vendor/distlib/util.py
54
51868
# # Copyright (C) 2012-2016 The Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # import codecs from collections import deque import contextlib import csv from glob import iglob as std_iglob import io import json import logging import os import py_compile import re import shutil import socket import ssl import subprocess import sys import tarfile import tempfile import textwrap try: import threading except ImportError: import dummy_threading as threading import time from . import DistlibException from .compat import (string_types, text_type, shutil, raw_input, StringIO, cache_from_source, urlopen, urljoin, httplib, xmlrpclib, splittype, HTTPHandler, HTTPSHandler as BaseHTTPSHandler, BaseConfigurator, valid_ident, Container, configparser, URLError, match_hostname, CertificateError, ZipFile) logger = logging.getLogger(__name__) # # Requirement parsing code for name + optional constraints + optional extras # # e.g. 'foo >= 1.2, < 2.0 [bar, baz]' # # The regex can seem a bit hairy, so we build it up out of smaller pieces # which are manageable. # COMMA = r'\s*,\s*' COMMA_RE = re.compile(COMMA) IDENT = r'(\w|[.-])+' EXTRA_IDENT = r'(\*|:(\*|\w+):|' + IDENT + ')' VERSPEC = IDENT + r'\*?' RELOP = '([<>=!~]=)|[<>]' # # The first relop is optional - if absent, will be taken as '~=' # BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + VERSPEC + ')(' + COMMA + '(' + RELOP + r')\s*(' + VERSPEC + '))*') DIRECT_REF = '(from\s+(?P<diref>.*))' # # Either the bare constraints or the bare constraints in parentheses # CONSTRAINTS = (r'\(\s*(?P<c1>' + BARE_CONSTRAINTS + '|' + DIRECT_REF + r')\s*\)|(?P<c2>' + BARE_CONSTRAINTS + '\s*)') EXTRA_LIST = EXTRA_IDENT + '(' + COMMA + EXTRA_IDENT + ')*' EXTRAS = r'\[\s*(?P<ex>' + EXTRA_LIST + r')?\s*\]' REQUIREMENT = ('(?P<dn>' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' + CONSTRAINTS + ')?$') REQUIREMENT_RE = re.compile(REQUIREMENT) # # Used to scan through the constraints # RELOP_IDENT = '(?P<op>' + RELOP + r')\s*(?P<vn>' + VERSPEC + ')' RELOP_IDENT_RE = re.compile(RELOP_IDENT) def parse_requirement(s): def get_constraint(m): d = m.groupdict() return d['op'], d['vn'] result = None m = REQUIREMENT_RE.match(s) if m: d = m.groupdict() name = d['dn'] cons = d['c1'] or d['c2'] if not d['diref']: url = None else: # direct reference cons = None url = d['diref'].strip() if not cons: cons = None constr = '' rs = d['dn'] else: if cons[0] not in '<>!=': cons = '~=' + cons iterator = RELOP_IDENT_RE.finditer(cons) cons = [get_constraint(m) for m in iterator] rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons])) if not d['ex']: extras = None else: extras = COMMA_RE.split(d['ex']) result = Container(name=name, constraints=cons, extras=extras, requirement=rs, source=s, url=url) return result def get_resources_dests(resources_root, rules): """Find destinations for resources files""" def get_rel_path(base, path): # normalizes and returns a lstripped-/-separated path base = base.replace(os.path.sep, '/') path = path.replace(os.path.sep, '/') assert path.startswith(base) return path[len(base):].lstrip('/') destinations = {} for base, suffix, dest in rules: prefix = os.path.join(resources_root, base) for abs_base in iglob(prefix): abs_glob = os.path.join(abs_base, suffix) for abs_path in iglob(abs_glob): resource_file = get_rel_path(resources_root, abs_path) if dest is None: # remove the entry if it was here destinations.pop(resource_file, None) else: rel_path = get_rel_path(abs_base, abs_path) rel_dest = dest.replace(os.path.sep, '/').rstrip('/') destinations[resource_file] = rel_dest + '/' + rel_path return destinations def in_venv(): if hasattr(sys, 'real_prefix'): # virtualenv venvs result = True else: # PEP 405 venvs result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix) return result def get_executable(): # The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as # changes to the stub launcher mean that sys.executable always points # to the stub on OS X # if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__' # in os.environ): # result = os.environ['__PYVENV_LAUNCHER__'] # else: # result = sys.executable # return result return os.path.normcase(sys.executable) def proceed(prompt, allowed_chars, error_prompt=None, default=None): p = prompt while True: s = raw_input(p) p = prompt if not s and default: s = default if s: c = s[0].lower() if c in allowed_chars: break if error_prompt: p = '%c: %s\n%s' % (c, error_prompt, prompt) return c def extract_by_key(d, keys): if isinstance(keys, string_types): keys = keys.split() result = {} for key in keys: if key in d: result[key] = d[key] return result def read_exports(stream): if sys.version_info[0] >= 3: # needs to be a text stream stream = codecs.getreader('utf-8')(stream) # Try to load as JSON, falling back on legacy format data = stream.read() stream = StringIO(data) try: jdata = json.load(stream) result = jdata['extensions']['python.exports']['exports'] for group, entries in result.items(): for k, v in entries.items(): s = '%s = %s' % (k, v) entry = get_export_entry(s) assert entry is not None entries[k] = entry return result except Exception: stream.seek(0, 0) def read_stream(cp, stream): if hasattr(cp, 'read_file'): cp.read_file(stream) else: cp.readfp(stream) cp = configparser.ConfigParser() try: read_stream(cp, stream) except configparser.MissingSectionHeaderError: stream.close() data = textwrap.dedent(data) stream = StringIO(data) read_stream(cp, stream) result = {} for key in cp.sections(): result[key] = entries = {} for name, value in cp.items(key): s = '%s = %s' % (name, value) entry = get_export_entry(s) assert entry is not None #entry.dist = self entries[name] = entry return result def write_exports(exports, stream): if sys.version_info[0] >= 3: # needs to be a text stream stream = codecs.getwriter('utf-8')(stream) cp = configparser.ConfigParser() for k, v in exports.items(): # TODO check k, v for valid values cp.add_section(k) for entry in v.values(): if entry.suffix is None: s = entry.prefix else: s = '%s:%s' % (entry.prefix, entry.suffix) if entry.flags: s = '%s [%s]' % (s, ', '.join(entry.flags)) cp.set(k, entry.name, s) cp.write(stream) @contextlib.contextmanager def tempdir(): td = tempfile.mkdtemp() try: yield td finally: shutil.rmtree(td) @contextlib.contextmanager def chdir(d): cwd = os.getcwd() try: os.chdir(d) yield finally: os.chdir(cwd) @contextlib.contextmanager def socket_timeout(seconds=15): cto = socket.getdefaulttimeout() try: socket.setdefaulttimeout(seconds) yield finally: socket.setdefaulttimeout(cto) class cached_property(object): def __init__(self, func): self.func = func #for attr in ('__name__', '__module__', '__doc__'): # setattr(self, attr, getattr(func, attr, None)) def __get__(self, obj, cls=None): if obj is None: return self value = self.func(obj) object.__setattr__(obj, self.func.__name__, value) #obj.__dict__[self.func.__name__] = value = self.func(obj) return value def convert_path(pathname): """Return 'pathname' as a name that will work on the native filesystem. The path is split on '/' and put back together again using the current directory separator. Needed because filenames in the setup script are always supplied in Unix style, and have to be converted to the local convention before we can actually use them in the filesystem. Raises ValueError on non-Unix-ish systems if 'pathname' either starts or ends with a slash. """ if os.sep == '/': return pathname if not pathname: return pathname if pathname[0] == '/': raise ValueError("path '%s' cannot be absolute" % pathname) if pathname[-1] == '/': raise ValueError("path '%s' cannot end with '/'" % pathname) paths = pathname.split('/') while os.curdir in paths: paths.remove(os.curdir) if not paths: return os.curdir return os.path.join(*paths) class FileOperator(object): def __init__(self, dry_run=False): self.dry_run = dry_run self.ensured = set() self._init_record() def _init_record(self): self.record = False self.files_written = set() self.dirs_created = set() def record_as_written(self, path): if self.record: self.files_written.add(path) def newer(self, source, target): """Tell if the target is newer than the source. Returns true if 'source' exists and is more recently modified than 'target', or if 'source' exists and 'target' doesn't. Returns false if both exist and 'target' is the same age or younger than 'source'. Raise PackagingFileError if 'source' does not exist. Note that this test is not very accurate: files created in the same second will have the same "age". """ if not os.path.exists(source): raise DistlibException("file '%r' does not exist" % os.path.abspath(source)) if not os.path.exists(target): return True return os.stat(source).st_mtime > os.stat(target).st_mtime def copy_file(self, infile, outfile, check=True): """Copy a file respecting dry-run and force flags. """ self.ensure_dir(os.path.dirname(outfile)) logger.info('Copying %s to %s', infile, outfile) if not self.dry_run: msg = None if check: if os.path.islink(outfile): msg = '%s is a symlink' % outfile elif os.path.exists(outfile) and not os.path.isfile(outfile): msg = '%s is a non-regular file' % outfile if msg: raise ValueError(msg + ' which would be overwritten') shutil.copyfile(infile, outfile) self.record_as_written(outfile) def copy_stream(self, instream, outfile, encoding=None): assert not os.path.isdir(outfile) self.ensure_dir(os.path.dirname(outfile)) logger.info('Copying stream %s to %s', instream, outfile) if not self.dry_run: if encoding is None: outstream = open(outfile, 'wb') else: outstream = codecs.open(outfile, 'w', encoding=encoding) try: shutil.copyfileobj(instream, outstream) finally: outstream.close() self.record_as_written(outfile) def write_binary_file(self, path, data): self.ensure_dir(os.path.dirname(path)) if not self.dry_run: with open(path, 'wb') as f: f.write(data) self.record_as_written(path) def write_text_file(self, path, data, encoding): self.ensure_dir(os.path.dirname(path)) if not self.dry_run: with open(path, 'wb') as f: f.write(data.encode(encoding)) self.record_as_written(path) def set_mode(self, bits, mask, files): if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'): # Set the executable bits (owner, group, and world) on # all the files specified. for f in files: if self.dry_run: logger.info("changing mode of %s", f) else: mode = (os.stat(f).st_mode | bits) & mask logger.info("changing mode of %s to %o", f, mode) os.chmod(f, mode) set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f) def ensure_dir(self, path): path = os.path.abspath(path) if path not in self.ensured and not os.path.exists(path): self.ensured.add(path) d, f = os.path.split(path) self.ensure_dir(d) logger.info('Creating %s' % path) if not self.dry_run: os.mkdir(path) if self.record: self.dirs_created.add(path) def byte_compile(self, path, optimize=False, force=False, prefix=None): dpath = cache_from_source(path, not optimize) logger.info('Byte-compiling %s to %s', path, dpath) if not self.dry_run: if force or self.newer(path, dpath): if not prefix: diagpath = None else: assert path.startswith(prefix) diagpath = path[len(prefix):] py_compile.compile(path, dpath, diagpath, True) # raise error self.record_as_written(dpath) return dpath def ensure_removed(self, path): if os.path.exists(path): if os.path.isdir(path) and not os.path.islink(path): logger.debug('Removing directory tree at %s', path) if not self.dry_run: shutil.rmtree(path) if self.record: if path in self.dirs_created: self.dirs_created.remove(path) else: if os.path.islink(path): s = 'link' else: s = 'file' logger.debug('Removing %s %s', s, path) if not self.dry_run: os.remove(path) if self.record: if path in self.files_written: self.files_written.remove(path) def is_writable(self, path): result = False while not result: if os.path.exists(path): result = os.access(path, os.W_OK) break parent = os.path.dirname(path) if parent == path: break path = parent return result def commit(self): """ Commit recorded changes, turn off recording, return changes. """ assert self.record result = self.files_written, self.dirs_created self._init_record() return result def rollback(self): if not self.dry_run: for f in list(self.files_written): if os.path.exists(f): os.remove(f) # dirs should all be empty now, except perhaps for # __pycache__ subdirs # reverse so that subdirs appear before their parents dirs = sorted(self.dirs_created, reverse=True) for d in dirs: flist = os.listdir(d) if flist: assert flist == ['__pycache__'] sd = os.path.join(d, flist[0]) os.rmdir(sd) os.rmdir(d) # should fail if non-empty self._init_record() def resolve(module_name, dotted_path): if module_name in sys.modules: mod = sys.modules[module_name] else: mod = __import__(module_name) if dotted_path is None: result = mod else: parts = dotted_path.split('.') result = getattr(mod, parts.pop(0)) for p in parts: result = getattr(result, p) return result class ExportEntry(object): def __init__(self, name, prefix, suffix, flags): self.name = name self.prefix = prefix self.suffix = suffix self.flags = flags @cached_property def value(self): return resolve(self.prefix, self.suffix) def __repr__(self): return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix, self.suffix, self.flags) def __eq__(self, other): if not isinstance(other, ExportEntry): result = False else: result = (self.name == other.name and self.prefix == other.prefix and self.suffix == other.suffix and self.flags == other.flags) return result __hash__ = object.__hash__ ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.])+) \s*=\s*(?P<callable>(\w+)([:\.]\w+)*) \s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])? ''', re.VERBOSE) def get_export_entry(specification): m = ENTRY_RE.search(specification) if not m: result = None if '[' in specification or ']' in specification: raise DistlibException('Invalid specification ' '%r' % specification) else: d = m.groupdict() name = d['name'] path = d['callable'] colons = path.count(':') if colons == 0: prefix, suffix = path, None else: if colons != 1: raise DistlibException('Invalid specification ' '%r' % specification) prefix, suffix = path.split(':') flags = d['flags'] if flags is None: if '[' in specification or ']' in specification: raise DistlibException('Invalid specification ' '%r' % specification) flags = [] else: flags = [f.strip() for f in flags.split(',')] result = ExportEntry(name, prefix, suffix, flags) return result def get_cache_base(suffix=None): """ Return the default base location for distlib caches. If the directory does not exist, it is created. Use the suffix provided for the base directory, and default to '.distlib' if it isn't provided. On Windows, if LOCALAPPDATA is defined in the environment, then it is assumed to be a directory, and will be the parent directory of the result. On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home directory - using os.expanduser('~') - will be the parent directory of the result. The result is just the directory '.distlib' in the parent directory as determined above, or with the name specified with ``suffix``. """ if suffix is None: suffix = '.distlib' if os.name == 'nt' and 'LOCALAPPDATA' in os.environ: result = os.path.expandvars('$localappdata') else: # Assume posix, or old Windows result = os.path.expanduser('~') # we use 'isdir' instead of 'exists', because we want to # fail if there's a file with that name if os.path.isdir(result): usable = os.access(result, os.W_OK) if not usable: logger.warning('Directory exists but is not writable: %s', result) else: try: os.makedirs(result) usable = True except OSError: logger.warning('Unable to create %s', result, exc_info=True) usable = False if not usable: result = tempfile.mkdtemp() logger.warning('Default location unusable, using %s', result) return os.path.join(result, suffix) def path_to_cache_dir(path): """ Convert an absolute path to a directory name for use in a cache. The algorithm used is: #. On Windows, any ``':'`` in the drive is replaced with ``'---'``. #. Any occurrence of ``os.sep`` is replaced with ``'--'``. #. ``'.cache'`` is appended. """ d, p = os.path.splitdrive(os.path.abspath(path)) if d: d = d.replace(':', '---') p = p.replace(os.sep, '--') return d + p + '.cache' def ensure_slash(s): if not s.endswith('/'): return s + '/' return s def parse_credentials(netloc): username = password = None if '@' in netloc: prefix, netloc = netloc.split('@', 1) if ':' not in prefix: username = prefix else: username, password = prefix.split(':', 1) return username, password, netloc def get_process_umask(): result = os.umask(0o22) os.umask(result) return result def is_string_sequence(seq): result = True i = None for i, s in enumerate(seq): if not isinstance(s, string_types): result = False break assert i is not None return result PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-' '([a-z0-9_.+-]+)', re.I) PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)') def split_filename(filename, project_name=None): """ Extract name, version, python version from a filename (no extension) Return name, version, pyver or None """ result = None pyver = None m = PYTHON_VERSION.search(filename) if m: pyver = m.group(1) filename = filename[:m.start()] if project_name and len(filename) > len(project_name) + 1: m = re.match(re.escape(project_name) + r'\b', filename) if m: n = m.end() result = filename[:n], filename[n + 1:], pyver if result is None: m = PROJECT_NAME_AND_VERSION.match(filename) if m: result = m.group(1), m.group(3), pyver return result # Allow spaces in name because of legacy dists like "Twisted Core" NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*' r'\(\s*(?P<ver>[^\s)]+)\)$') def parse_name_and_version(p): """ A utility method used to get name and version from a string. From e.g. a Provides-Dist value. :param p: A value in a form 'foo (1.0)' :return: The name and version as a tuple. """ m = NAME_VERSION_RE.match(p) if not m: raise DistlibException('Ill-formed name/version string: \'%s\'' % p) d = m.groupdict() return d['name'].strip().lower(), d['ver'] def get_extras(requested, available): result = set() requested = set(requested or []) available = set(available or []) if '*' in requested: requested.remove('*') result |= available for r in requested: if r == '-': result.add(r) elif r.startswith('-'): unwanted = r[1:] if unwanted not in available: logger.warning('undeclared extra: %s' % unwanted) if unwanted in result: result.remove(unwanted) else: if r not in available: logger.warning('undeclared extra: %s' % r) result.add(r) return result # # Extended metadata functionality # def _get_external_data(url): result = {} try: # urlopen might fail if it runs into redirections, # because of Python issue #13696. Fixed in locators # using a custom redirect handler. resp = urlopen(url) headers = resp.info() if headers.get('Content-Type') != 'application/json': logger.debug('Unexpected response for JSON request') else: reader = codecs.getreader('utf-8')(resp) #data = reader.read().decode('utf-8') #result = json.loads(data) result = json.load(reader) except Exception as e: logger.exception('Failed to get external data for %s: %s', url, e) return result _external_data_base_url = 'https://www.red-dove.com/pypi/projects/' def get_project_data(name): url = '%s/%s/project.json' % (name[0].upper(), name) url = urljoin(_external_data_base_url, url) result = _get_external_data(url) return result def get_package_data(name, version): url = '%s/%s/package-%s.json' % (name[0].upper(), name, version) url = urljoin(_external_data_base_url, url) return _get_external_data(url) class Cache(object): """ A class implementing a cache for resources that need to live in the file system e.g. shared libraries. This class was moved from resources to here because it could be used by other modules, e.g. the wheel module. """ def __init__(self, base): """ Initialise an instance. :param base: The base directory where the cache should be located. """ # we use 'isdir' instead of 'exists', because we want to # fail if there's a file with that name if not os.path.isdir(base): os.makedirs(base) if (os.stat(base).st_mode & 0o77) != 0: logger.warning('Directory \'%s\' is not private', base) self.base = os.path.abspath(os.path.normpath(base)) def prefix_to_dir(self, prefix): """ Converts a resource prefix to a directory name in the cache. """ return path_to_cache_dir(prefix) def clear(self): """ Clear the cache. """ not_removed = [] for fn in os.listdir(self.base): fn = os.path.join(self.base, fn) try: if os.path.islink(fn) or os.path.isfile(fn): os.remove(fn) elif os.path.isdir(fn): shutil.rmtree(fn) except Exception: not_removed.append(fn) return not_removed class EventMixin(object): """ A very simple publish/subscribe system. """ def __init__(self): self._subscribers = {} def add(self, event, subscriber, append=True): """ Add a subscriber for an event. :param event: The name of an event. :param subscriber: The subscriber to be added (and called when the event is published). :param append: Whether to append or prepend the subscriber to an existing subscriber list for the event. """ subs = self._subscribers if event not in subs: subs[event] = deque([subscriber]) else: sq = subs[event] if append: sq.append(subscriber) else: sq.appendleft(subscriber) def remove(self, event, subscriber): """ Remove a subscriber for an event. :param event: The name of an event. :param subscriber: The subscriber to be removed. """ subs = self._subscribers if event not in subs: raise ValueError('No subscribers: %r' % event) subs[event].remove(subscriber) def get_subscribers(self, event): """ Return an iterator for the subscribers for an event. :param event: The event to return subscribers for. """ return iter(self._subscribers.get(event, ())) def publish(self, event, *args, **kwargs): """ Publish a event and return a list of values returned by its subscribers. :param event: The event to publish. :param args: The positional arguments to pass to the event's subscribers. :param kwargs: The keyword arguments to pass to the event's subscribers. """ result = [] for subscriber in self.get_subscribers(event): try: value = subscriber(event, *args, **kwargs) except Exception: logger.exception('Exception during event publication') value = None result.append(value) logger.debug('publish %s: args = %s, kwargs = %s, result = %s', event, args, kwargs, result) return result # # Simple sequencing # class Sequencer(object): def __init__(self): self._preds = {} self._succs = {} self._nodes = set() # nodes with no preds/succs def add_node(self, node): self._nodes.add(node) def remove_node(self, node, edges=False): if node in self._nodes: self._nodes.remove(node) if edges: for p in set(self._preds.get(node, ())): self.remove(p, node) for s in set(self._succs.get(node, ())): self.remove(node, s) # Remove empties for k, v in list(self._preds.items()): if not v: del self._preds[k] for k, v in list(self._succs.items()): if not v: del self._succs[k] def add(self, pred, succ): assert pred != succ self._preds.setdefault(succ, set()).add(pred) self._succs.setdefault(pred, set()).add(succ) def remove(self, pred, succ): assert pred != succ try: preds = self._preds[succ] succs = self._succs[pred] except KeyError: raise ValueError('%r not a successor of anything' % succ) try: preds.remove(pred) succs.remove(succ) except KeyError: raise ValueError('%r not a successor of %r' % (succ, pred)) def is_step(self, step): return (step in self._preds or step in self._succs or step in self._nodes) def get_steps(self, final): if not self.is_step(final): raise ValueError('Unknown: %r' % final) result = [] todo = [] seen = set() todo.append(final) while todo: step = todo.pop(0) if step in seen: # if a step was already seen, # move it to the end (so it will appear earlier # when reversed on return) ... but not for the # final step, as that would be confusing for # users if step != final: result.remove(step) result.append(step) else: seen.add(step) result.append(step) preds = self._preds.get(step, ()) todo.extend(preds) return reversed(result) @property def strong_connections(self): #http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm index_counter = [0] stack = [] lowlinks = {} index = {} result = [] graph = self._succs def strongconnect(node): # set the depth index for this node to the smallest unused index index[node] = index_counter[0] lowlinks[node] = index_counter[0] index_counter[0] += 1 stack.append(node) # Consider successors try: successors = graph[node] except Exception: successors = [] for successor in successors: if successor not in lowlinks: # Successor has not yet been visited strongconnect(successor) lowlinks[node] = min(lowlinks[node],lowlinks[successor]) elif successor in stack: # the successor is in the stack and hence in the current # strongly connected component (SCC) lowlinks[node] = min(lowlinks[node],index[successor]) # If `node` is a root node, pop the stack and generate an SCC if lowlinks[node] == index[node]: connected_component = [] while True: successor = stack.pop() connected_component.append(successor) if successor == node: break component = tuple(connected_component) # storing the result result.append(component) for node in graph: if node not in lowlinks: strongconnect(node) return result @property def dot(self): result = ['digraph G {'] for succ in self._preds: preds = self._preds[succ] for pred in preds: result.append(' %s -> %s;' % (pred, succ)) for node in self._nodes: result.append(' %s;' % node) result.append('}') return '\n'.join(result) # # Unarchiving functionality for zip, tar, tgz, tbz, whl # ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz', '.whl') def unarchive(archive_filename, dest_dir, format=None, check=True): def check_path(path): if not isinstance(path, text_type): path = path.decode('utf-8') p = os.path.abspath(os.path.join(dest_dir, path)) if not p.startswith(dest_dir) or p[plen] != os.sep: raise ValueError('path outside destination: %r' % p) dest_dir = os.path.abspath(dest_dir) plen = len(dest_dir) archive = None if format is None: if archive_filename.endswith(('.zip', '.whl')): format = 'zip' elif archive_filename.endswith(('.tar.gz', '.tgz')): format = 'tgz' mode = 'r:gz' elif archive_filename.endswith(('.tar.bz2', '.tbz')): format = 'tbz' mode = 'r:bz2' elif archive_filename.endswith('.tar'): format = 'tar' mode = 'r' else: raise ValueError('Unknown format for %r' % archive_filename) try: if format == 'zip': archive = ZipFile(archive_filename, 'r') if check: names = archive.namelist() for name in names: check_path(name) else: archive = tarfile.open(archive_filename, mode) if check: names = archive.getnames() for name in names: check_path(name) if format != 'zip' and sys.version_info[0] < 3: # See Python issue 17153. If the dest path contains Unicode, # tarfile extraction fails on Python 2.x if a member path name # contains non-ASCII characters - it leads to an implicit # bytes -> unicode conversion using ASCII to decode. for tarinfo in archive.getmembers(): if not isinstance(tarinfo.name, text_type): tarinfo.name = tarinfo.name.decode('utf-8') archive.extractall(dest_dir) finally: if archive: archive.close() def zip_dir(directory): """zip a directory tree into a BytesIO object""" result = io.BytesIO() dlen = len(directory) with ZipFile(result, "w") as zf: for root, dirs, files in os.walk(directory): for name in files: full = os.path.join(root, name) rel = root[dlen:] dest = os.path.join(rel, name) zf.write(full, dest) return result # # Simple progress bar # UNITS = ('', 'K', 'M', 'G','T','P') class Progress(object): unknown = 'UNKNOWN' def __init__(self, minval=0, maxval=100): assert maxval is None or maxval >= minval self.min = self.cur = minval self.max = maxval self.started = None self.elapsed = 0 self.done = False def update(self, curval): assert self.min <= curval assert self.max is None or curval <= self.max self.cur = curval now = time.time() if self.started is None: self.started = now else: self.elapsed = now - self.started def increment(self, incr): assert incr >= 0 self.update(self.cur + incr) def start(self): self.update(self.min) return self def stop(self): if self.max is not None: self.update(self.max) self.done = True @property def maximum(self): return self.unknown if self.max is None else self.max @property def percentage(self): if self.done: result = '100 %' elif self.max is None: result = ' ?? %' else: v = 100.0 * (self.cur - self.min) / (self.max - self.min) result = '%3d %%' % v return result def format_duration(self, duration): if (duration <= 0) and self.max is None or self.cur == self.min: result = '??:??:??' #elif duration < 1: # result = '--:--:--' else: result = time.strftime('%H:%M:%S', time.gmtime(duration)) return result @property def ETA(self): if self.done: prefix = 'Done' t = self.elapsed #import pdb; pdb.set_trace() else: prefix = 'ETA ' if self.max is None: t = -1 elif self.elapsed == 0 or (self.cur == self.min): t = 0 else: #import pdb; pdb.set_trace() t = float(self.max - self.min) t /= self.cur - self.min t = (t - 1) * self.elapsed return '%s: %s' % (prefix, self.format_duration(t)) @property def speed(self): if self.elapsed == 0: result = 0.0 else: result = (self.cur - self.min) / self.elapsed for unit in UNITS: if result < 1000: break result /= 1000.0 return '%d %sB/s' % (result, unit) # # Glob functionality # RICH_GLOB = re.compile(r'\{([^}]*)\}') _CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]') _CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$') def iglob(path_glob): """Extended globbing function that supports ** and {opt1,opt2,opt3}.""" if _CHECK_RECURSIVE_GLOB.search(path_glob): msg = """invalid glob %r: recursive glob "**" must be used alone""" raise ValueError(msg % path_glob) if _CHECK_MISMATCH_SET.search(path_glob): msg = """invalid glob %r: mismatching set marker '{' or '}'""" raise ValueError(msg % path_glob) return _iglob(path_glob) def _iglob(path_glob): rich_path_glob = RICH_GLOB.split(path_glob, 1) if len(rich_path_glob) > 1: assert len(rich_path_glob) == 3, rich_path_glob prefix, set, suffix = rich_path_glob for item in set.split(','): for path in _iglob(''.join((prefix, item, suffix))): yield path else: if '**' not in path_glob: for item in std_iglob(path_glob): yield item else: prefix, radical = path_glob.split('**', 1) if prefix == '': prefix = '.' if radical == '': radical = '*' else: # we support both radical = radical.lstrip('/') radical = radical.lstrip('\\') for path, dir, files in os.walk(prefix): path = os.path.normpath(path) for fn in _iglob(os.path.join(path, radical)): yield fn # # HTTPSConnection which verifies certificates/matches domains # class HTTPSConnection(httplib.HTTPSConnection): ca_certs = None # set this to the path to the certs file (.pem) check_domain = True # only used if ca_certs is not None # noinspection PyPropertyAccess def connect(self): sock = socket.create_connection((self.host, self.port), self.timeout) if getattr(self, '_tunnel_host', False): self.sock = sock self._tunnel() if not hasattr(ssl, 'SSLContext'): # For 2.x if self.ca_certs: cert_reqs = ssl.CERT_REQUIRED else: cert_reqs = ssl.CERT_NONE self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, cert_reqs=cert_reqs, ssl_version=ssl.PROTOCOL_SSLv23, ca_certs=self.ca_certs) else: context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) context.options |= ssl.OP_NO_SSLv2 if self.cert_file: context.load_cert_chain(self.cert_file, self.key_file) kwargs = {} if self.ca_certs: context.verify_mode = ssl.CERT_REQUIRED context.load_verify_locations(cafile=self.ca_certs) if getattr(ssl, 'HAS_SNI', False): kwargs['server_hostname'] = self.host self.sock = context.wrap_socket(sock, **kwargs) if self.ca_certs and self.check_domain: try: match_hostname(self.sock.getpeercert(), self.host) logger.debug('Host verified: %s', self.host) except CertificateError: self.sock.shutdown(socket.SHUT_RDWR) self.sock.close() raise class HTTPSHandler(BaseHTTPSHandler): def __init__(self, ca_certs, check_domain=True): BaseHTTPSHandler.__init__(self) self.ca_certs = ca_certs self.check_domain = check_domain def _conn_maker(self, *args, **kwargs): """ This is called to create a connection instance. Normally you'd pass a connection class to do_open, but it doesn't actually check for a class, and just expects a callable. As long as we behave just as a constructor would have, we should be OK. If it ever changes so that we *must* pass a class, we'll create an UnsafeHTTPSConnection class which just sets check_domain to False in the class definition, and choose which one to pass to do_open. """ result = HTTPSConnection(*args, **kwargs) if self.ca_certs: result.ca_certs = self.ca_certs result.check_domain = self.check_domain return result def https_open(self, req): try: return self.do_open(self._conn_maker, req) except URLError as e: if 'certificate verify failed' in str(e.reason): raise CertificateError('Unable to verify server certificate ' 'for %s' % req.host) else: raise # # To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The- # Middle proxy using HTTP listens on port 443, or an index mistakenly serves # HTML containing a http://xyz link when it should be https://xyz), # you can use the following handler class, which does not allow HTTP traffic. # # It works by inheriting from HTTPHandler - so build_opener won't add a # handler for HTTP itself. # class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler): def http_open(self, req): raise URLError('Unexpected HTTP request on what should be a secure ' 'connection: %s' % req) # # XML-RPC with timeouts # _ver_info = sys.version_info[:2] if _ver_info == (2, 6): class HTTP(httplib.HTTP): def __init__(self, host='', port=None, **kwargs): if port == 0: # 0 means use port 0, not the default port port = None self._setup(self._connection_class(host, port, **kwargs)) class HTTPS(httplib.HTTPS): def __init__(self, host='', port=None, **kwargs): if port == 0: # 0 means use port 0, not the default port port = None self._setup(self._connection_class(host, port, **kwargs)) class Transport(xmlrpclib.Transport): def __init__(self, timeout, use_datetime=0): self.timeout = timeout xmlrpclib.Transport.__init__(self, use_datetime) def make_connection(self, host): h, eh, x509 = self.get_host_info(host) if _ver_info == (2, 6): result = HTTP(h, timeout=self.timeout) else: if not self._connection or host != self._connection[0]: self._extra_headers = eh self._connection = host, httplib.HTTPConnection(h) result = self._connection[1] return result class SafeTransport(xmlrpclib.SafeTransport): def __init__(self, timeout, use_datetime=0): self.timeout = timeout xmlrpclib.SafeTransport.__init__(self, use_datetime) def make_connection(self, host): h, eh, kwargs = self.get_host_info(host) if not kwargs: kwargs = {} kwargs['timeout'] = self.timeout if _ver_info == (2, 6): result = HTTPS(host, None, **kwargs) else: if not self._connection or host != self._connection[0]: self._extra_headers = eh self._connection = host, httplib.HTTPSConnection(h, None, **kwargs) result = self._connection[1] return result class ServerProxy(xmlrpclib.ServerProxy): def __init__(self, uri, **kwargs): self.timeout = timeout = kwargs.pop('timeout', None) # The above classes only come into play if a timeout # is specified if timeout is not None: scheme, _ = splittype(uri) use_datetime = kwargs.get('use_datetime', 0) if scheme == 'https': tcls = SafeTransport else: tcls = Transport kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime) self.transport = t xmlrpclib.ServerProxy.__init__(self, uri, **kwargs) # # CSV functionality. This is provided because on 2.x, the csv module can't # handle Unicode. However, we need to deal with Unicode in e.g. RECORD files. # def _csv_open(fn, mode, **kwargs): if sys.version_info[0] < 3: mode += 'b' else: kwargs['newline'] = '' return open(fn, mode, **kwargs) class CSVBase(object): defaults = { 'delimiter': str(','), # The strs are used because we need native 'quotechar': str('"'), # str in the csv API (2.x won't take 'lineterminator': str('\n') # Unicode) } def __enter__(self): return self def __exit__(self, *exc_info): self.stream.close() class CSVReader(CSVBase): def __init__(self, **kwargs): if 'stream' in kwargs: stream = kwargs['stream'] if sys.version_info[0] >= 3: # needs to be a text stream stream = codecs.getreader('utf-8')(stream) self.stream = stream else: self.stream = _csv_open(kwargs['path'], 'r') self.reader = csv.reader(self.stream, **self.defaults) def __iter__(self): return self def next(self): result = next(self.reader) if sys.version_info[0] < 3: for i, item in enumerate(result): if not isinstance(item, text_type): result[i] = item.decode('utf-8') return result __next__ = next class CSVWriter(CSVBase): def __init__(self, fn, **kwargs): self.stream = _csv_open(fn, 'w') self.writer = csv.writer(self.stream, **self.defaults) def writerow(self, row): if sys.version_info[0] < 3: r = [] for item in row: if isinstance(item, text_type): item = item.encode('utf-8') r.append(item) row = r self.writer.writerow(row) # # Configurator functionality # class Configurator(BaseConfigurator): value_converters = dict(BaseConfigurator.value_converters) value_converters['inc'] = 'inc_convert' def __init__(self, config, base=None): super(Configurator, self).__init__(config) self.base = base or os.getcwd() def configure_custom(self, config): def convert(o): if isinstance(o, (list, tuple)): result = type(o)([convert(i) for i in o]) elif isinstance(o, dict): if '()' in o: result = self.configure_custom(o) else: result = {} for k in o: result[k] = convert(o[k]) else: result = self.convert(o) return result c = config.pop('()') if not callable(c): c = self.resolve(c) props = config.pop('.', None) # Check for valid identifiers args = config.pop('[]', ()) if args: args = tuple([convert(o) for o in args]) items = [(k, convert(config[k])) for k in config if valid_ident(k)] kwargs = dict(items) result = c(*args, **kwargs) if props: for n, v in props.items(): setattr(result, n, convert(v)) return result def __getitem__(self, key): result = self.config[key] if isinstance(result, dict) and '()' in result: self.config[key] = result = self.configure_custom(result) return result def inc_convert(self, value): """Default converter for the inc:// protocol.""" if not os.path.isabs(value): value = os.path.join(self.base, value) with codecs.open(value, 'r', encoding='utf-8') as f: result = json.load(f) return result # # Mixin for running subprocesses and capturing their output # class SubprocessMixin(object): def __init__(self, verbose=False, progress=None): self.verbose = verbose self.progress = progress def reader(self, stream, context): """ Read lines from a subprocess' output stream and either pass to a progress callable (if specified) or write progress information to sys.stderr. """ progress = self.progress verbose = self.verbose while True: s = stream.readline() if not s: break if progress is not None: progress(s, context) else: if not verbose: sys.stderr.write('.') else: sys.stderr.write(s.decode('utf-8')) sys.stderr.flush() stream.close() def run_command(self, cmd, **kwargs): p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout')) t1.start() t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr')) t2.start() p.wait() t1.join() t2.join() if self.progress is not None: self.progress('done.', 'main') elif self.verbose: sys.stderr.write('done.\n') return p
mit
glancyea/wastepermitcontent
node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/MSVSUserFile.py
2710
5094
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Visual Studio user preferences file writer.""" import os import re import socket # for gethostname import gyp.common import gyp.easy_xml as easy_xml #------------------------------------------------------------------------------ def _FindCommandInPath(command): """If there are no slashes in the command given, this function searches the PATH env to find the given command, and converts it to an absolute path. We have to do this because MSVS is looking for an actual file to launch a debugger on, not just a command line. Note that this happens at GYP time, so anything needing to be built needs to have a full path.""" if '/' in command or '\\' in command: # If the command already has path elements (either relative or # absolute), then assume it is constructed properly. return command else: # Search through the path list and find an existing file that # we can access. paths = os.environ.get('PATH','').split(os.pathsep) for path in paths: item = os.path.join(path, command) if os.path.isfile(item) and os.access(item, os.X_OK): return item return command def _QuoteWin32CommandLineArgs(args): new_args = [] for arg in args: # Replace all double-quotes with double-double-quotes to escape # them for cmd shell, and then quote the whole thing if there # are any. if arg.find('"') != -1: arg = '""'.join(arg.split('"')) arg = '"%s"' % arg # Otherwise, if there are any spaces, quote the whole arg. elif re.search(r'[ \t\n]', arg): arg = '"%s"' % arg new_args.append(arg) return new_args class Writer(object): """Visual Studio XML user user file writer.""" def __init__(self, user_file_path, version, name): """Initializes the user file. Args: user_file_path: Path to the user file. version: Version info. name: Name of the user file. """ self.user_file_path = user_file_path self.version = version self.name = name self.configurations = {} def AddConfig(self, name): """Adds a configuration to the project. Args: name: Configuration name. """ self.configurations[name] = ['Configuration', {'Name': name}] def AddDebugSettings(self, config_name, command, environment = {}, working_directory=""): """Adds a DebugSettings node to the user file for a particular config. Args: command: command line to run. First element in the list is the executable. All elements of the command will be quoted if necessary. working_directory: other files which may trigger the rule. (optional) """ command = _QuoteWin32CommandLineArgs(command) abs_command = _FindCommandInPath(command[0]) if environment and isinstance(environment, dict): env_list = ['%s="%s"' % (key, val) for (key,val) in environment.iteritems()] environment = ' '.join(env_list) else: environment = '' n_cmd = ['DebugSettings', {'Command': abs_command, 'WorkingDirectory': working_directory, 'CommandArguments': " ".join(command[1:]), 'RemoteMachine': socket.gethostname(), 'Environment': environment, 'EnvironmentMerge': 'true', # Currently these are all "dummy" values that we're just setting # in the default manner that MSVS does it. We could use some of # these to add additional capabilities, I suppose, but they might # not have parity with other platforms then. 'Attach': 'false', 'DebuggerType': '3', # 'auto' debugger 'Remote': '1', 'RemoteCommand': '', 'HttpUrl': '', 'PDBPath': '', 'SQLDebugging': '', 'DebuggerFlavor': '0', 'MPIRunCommand': '', 'MPIRunArguments': '', 'MPIRunWorkingDirectory': '', 'ApplicationCommand': '', 'ApplicationArguments': '', 'ShimCommand': '', 'MPIAcceptMode': '', 'MPIAcceptFilter': '' }] # Find the config, and add it if it doesn't exist. if config_name not in self.configurations: self.AddConfig(config_name) # Add the DebugSettings onto the appropriate config. self.configurations[config_name].append(n_cmd) def WriteIfChanged(self): """Writes the user file.""" configs = ['Configurations'] for config, spec in sorted(self.configurations.iteritems()): configs.append(spec) content = ['VisualStudioUserFile', {'Version': self.version.ProjectVersion(), 'Name': self.name }, configs] easy_xml.WriteXmlIfChanged(content, self.user_file_path, encoding="Windows-1252")
mit
thomasalrin/Ghost
node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/pygments/lexers/other.py
197
170951
# -*- coding: utf-8 -*- """ pygments.lexers.other ~~~~~~~~~~~~~~~~~~~~~ Lexers for other languages. :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, include, bygroups, using, \ this, combined, ExtendedRegexLexer from pygments.token import Error, Punctuation, Literal, Token, \ Text, Comment, Operator, Keyword, Name, String, Number, Generic, \ Whitespace from pygments.util import get_bool_opt from pygments.lexers.web import HtmlLexer from pygments.lexers._openedgebuiltins import OPENEDGEKEYWORDS from pygments.lexers._robotframeworklexer import RobotFrameworkLexer # backwards compatibility from pygments.lexers.sql import SqlLexer, MySqlLexer, SqliteConsoleLexer from pygments.lexers.shell import BashLexer, BashSessionLexer, BatchLexer, \ TcshLexer __all__ = ['BrainfuckLexer', 'BefungeLexer', 'RedcodeLexer', 'MOOCodeLexer', 'SmalltalkLexer', 'LogtalkLexer', 'GnuplotLexer', 'PovrayLexer', 'AppleScriptLexer', 'ModelicaLexer', 'RebolLexer', 'ABAPLexer', 'NewspeakLexer', 'GherkinLexer', 'AsymptoteLexer', 'PostScriptLexer', 'AutohotkeyLexer', 'GoodDataCLLexer', 'MaqlLexer', 'ProtoBufLexer', 'HybrisLexer', 'AwkLexer', 'Cfengine3Lexer', 'SnobolLexer', 'ECLLexer', 'UrbiscriptLexer', 'OpenEdgeLexer', 'BroLexer', 'MscgenLexer', 'KconfigLexer', 'VGLLexer', 'SourcePawnLexer', 'RobotFrameworkLexer', 'PuppetLexer', 'NSISLexer', 'RPMSpecLexer', 'CbmBasicV2Lexer', 'AutoItLexer', 'RexxLexer'] class ECLLexer(RegexLexer): """ Lexer for the declarative big-data `ECL <http://hpccsystems.com/community/docs/ecl-language-reference/html>`_ language. *New in Pygments 1.5.* """ name = 'ECL' aliases = ['ecl'] filenames = ['*.ecl'] mimetypes = ['application/x-ecl'] flags = re.IGNORECASE | re.MULTILINE tokens = { 'root': [ include('whitespace'), include('statements'), ], 'whitespace': [ (r'\s+', Text), (r'\/\/.*', Comment.Single), (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline), ], 'statements': [ include('types'), include('keywords'), include('functions'), include('hash'), (r'"', String, 'string'), (r'\'', String, 'string'), (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float), (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex), (r'0[0-7]+[LlUu]*', Number.Oct), (r'\d+[LlUu]*', Number.Integer), (r'\*/', Error), (r'[~!%^&*+=|?:<>/-]+', Operator), (r'[{}()\[\],.;]', Punctuation), (r'[a-zA-Z_][a-zA-Z0-9_]*', Name), ], 'hash': [ (r'^#.*$', Comment.Preproc), ], 'types': [ (r'(RECORD|END)\D', Keyword.Declaration), (r'((?:ASCII|BIG_ENDIAN|BOOLEAN|DATA|DECIMAL|EBCDIC|INTEGER|PATTERN|' r'QSTRING|REAL|RECORD|RULE|SET OF|STRING|TOKEN|UDECIMAL|UNICODE|' r'UNSIGNED|VARSTRING|VARUNICODE)\d*)(\s+)', bygroups(Keyword.Type, Text)), ], 'keywords': [ (r'(APPLY|ASSERT|BUILD|BUILDINDEX|EVALUATE|FAIL|KEYDIFF|KEYPATCH|' r'LOADXML|NOTHOR|NOTIFY|OUTPUT|PARALLEL|SEQUENTIAL|SOAPCALL|WAIT' r'CHECKPOINT|DEPRECATED|FAILCODE|FAILMESSAGE|FAILURE|GLOBAL|' r'INDEPENDENT|ONWARNING|PERSIST|PRIORITY|RECOVERY|STORED|SUCCESS|' r'WAIT|WHEN)\b', Keyword.Reserved), # These are classed differently, check later (r'(ALL|AND|ANY|AS|ATMOST|BEFORE|BEGINC\+\+|BEST|BETWEEN|CASE|CONST|' r'COUNTER|CSV|DESCEND|ENCRYPT|ENDC\+\+|ENDMACRO|EXCEPT|EXCLUSIVE|' r'EXPIRE|EXPORT|EXTEND|FALSE|FEW|FIRST|FLAT|FULL|FUNCTION|GROUP|' r'HEADER|HEADING|HOLE|IFBLOCK|IMPORT|IN|JOINED|KEEP|KEYED|LAST|' r'LEFT|LIMIT|LOAD|LOCAL|LOCALE|LOOKUP|MACRO|MANY|MAXCOUNT|' r'MAXLENGTH|MIN SKEW|MODULE|INTERFACE|NAMED|NOCASE|NOROOT|NOSCAN|' r'NOSORT|NOT|OF|ONLY|OPT|OR|OUTER|OVERWRITE|PACKED|PARTITION|' r'PENALTY|PHYSICALLENGTH|PIPE|QUOTE|RELATIONSHIP|REPEAT|RETURN|' r'RIGHT|SCAN|SELF|SEPARATOR|SERVICE|SHARED|SKEW|SKIP|SQL|STORE|' r'TERMINATOR|THOR|THRESHOLD|TOKEN|TRANSFORM|TRIM|TRUE|TYPE|' r'UNICODEORDER|UNSORTED|VALIDATE|VIRTUAL|WHOLE|WILD|WITHIN|XML|' r'XPATH|__COMPRESSED__)\b', Keyword.Reserved), ], 'functions': [ (r'(ABS|ACOS|ALLNODES|ASCII|ASIN|ASSTRING|ATAN|ATAN2|AVE|CASE|' r'CHOOSE|CHOOSEN|CHOOSESETS|CLUSTERSIZE|COMBINE|CORRELATION|COS|' r'COSH|COUNT|COVARIANCE|CRON|DATASET|DEDUP|DEFINE|DENORMALIZE|' r'DISTRIBUTE|DISTRIBUTED|DISTRIBUTION|EBCDIC|ENTH|ERROR|EVALUATE|' r'EVENT|EVENTEXTRA|EVENTNAME|EXISTS|EXP|FAILCODE|FAILMESSAGE|' r'FETCH|FROMUNICODE|GETISVALID|GLOBAL|GRAPH|GROUP|HASH|HASH32|' r'HASH64|HASHCRC|HASHMD5|HAVING|IF|INDEX|INTFORMAT|ISVALID|' r'ITERATE|JOIN|KEYUNICODE|LENGTH|LIBRARY|LIMIT|LN|LOCAL|LOG|LOOP|' r'MAP|MATCHED|MATCHLENGTH|MATCHPOSITION|MATCHTEXT|MATCHUNICODE|' r'MAX|MERGE|MERGEJOIN|MIN|NOLOCAL|NONEMPTY|NORMALIZE|PARSE|PIPE|' r'POWER|PRELOAD|PROCESS|PROJECT|PULL|RANDOM|RANGE|RANK|RANKED|' r'REALFORMAT|RECORDOF|REGEXFIND|REGEXREPLACE|REGROUP|REJECTED|' r'ROLLUP|ROUND|ROUNDUP|ROW|ROWDIFF|SAMPLE|SET|SIN|SINH|SIZEOF|' r'SOAPCALL|SORT|SORTED|SQRT|STEPPED|STORED|SUM|TABLE|TAN|TANH|' r'THISNODE|TOPN|TOUNICODE|TRANSFER|TRIM|TRUNCATE|TYPEOF|UNGROUP|' r'UNICODEORDER|VARIANCE|WHICH|WORKUNIT|XMLDECODE|XMLENCODE|' r'XMLTEXT|XMLUNICODE)\b', Name.Function), ], 'string': [ (r'"', String, '#pop'), (r'\'', String, '#pop'), (r'[^"\']+', String), ], } class BrainfuckLexer(RegexLexer): """ Lexer for the esoteric `BrainFuck <http://www.muppetlabs.com/~breadbox/bf/>`_ language. """ name = 'Brainfuck' aliases = ['brainfuck', 'bf'] filenames = ['*.bf', '*.b'] mimetypes = ['application/x-brainfuck'] tokens = { 'common': [ # use different colors for different instruction types (r'[.,]+', Name.Tag), (r'[+-]+', Name.Builtin), (r'[<>]+', Name.Variable), (r'[^.,+\-<>\[\]]+', Comment), ], 'root': [ (r'\[', Keyword, 'loop'), (r'\]', Error), include('common'), ], 'loop': [ (r'\[', Keyword, '#push'), (r'\]', Keyword, '#pop'), include('common'), ] } class BefungeLexer(RegexLexer): """ Lexer for the esoteric `Befunge <http://en.wikipedia.org/wiki/Befunge>`_ language. *New in Pygments 0.7.* """ name = 'Befunge' aliases = ['befunge'] filenames = ['*.befunge'] mimetypes = ['application/x-befunge'] tokens = { 'root': [ (r'[0-9a-f]', Number), (r'[\+\*/%!`-]', Operator), # Traditional math (r'[<>^v?\[\]rxjk]', Name.Variable), # Move, imperatives (r'[:\\$.,n]', Name.Builtin), # Stack ops, imperatives (r'[|_mw]', Keyword), (r'[{}]', Name.Tag), # Befunge-98 stack ops (r'".*?"', String.Double), # Strings don't appear to allow escapes (r'\'.', String.Single), # Single character (r'[#;]', Comment), # Trampoline... depends on direction hit (r'[pg&~=@iotsy]', Keyword), # Misc (r'[()A-Z]', Comment), # Fingerprints (r'\s+', Text), # Whitespace doesn't matter ], } class RedcodeLexer(RegexLexer): """ A simple Redcode lexer based on ICWS'94. Contributed by Adam Blinkinsop <blinks@acm.org>. *New in Pygments 0.8.* """ name = 'Redcode' aliases = ['redcode'] filenames = ['*.cw'] opcodes = ['DAT','MOV','ADD','SUB','MUL','DIV','MOD', 'JMP','JMZ','JMN','DJN','CMP','SLT','SPL', 'ORG','EQU','END'] modifiers = ['A','B','AB','BA','F','X','I'] tokens = { 'root': [ # Whitespace: (r'\s+', Text), (r';.*$', Comment.Single), # Lexemes: # Identifiers (r'\b(%s)\b' % '|'.join(opcodes), Name.Function), (r'\b(%s)\b' % '|'.join(modifiers), Name.Decorator), (r'[A-Za-z_][A-Za-z_0-9]+', Name), # Operators (r'[-+*/%]', Operator), (r'[#$@<>]', Operator), # mode (r'[.,]', Punctuation), # mode # Numbers (r'[-+]?\d+', Number.Integer), ], } class MOOCodeLexer(RegexLexer): """ For `MOOCode <http://www.moo.mud.org/>`_ (the MOO scripting language). *New in Pygments 0.9.* """ name = 'MOOCode' filenames = ['*.moo'] aliases = ['moocode', 'moo'] mimetypes = ['text/x-moocode'] tokens = { 'root' : [ # Numbers (r'(0|[1-9][0-9_]*)', Number.Integer), # Strings (r'"(\\\\|\\"|[^"])*"', String), # exceptions (r'(E_PERM|E_DIV)', Name.Exception), # db-refs (r'((#[-0-9]+)|(\$[a-z_A-Z0-9]+))', Name.Entity), # Keywords (r'\b(if|else|elseif|endif|for|endfor|fork|endfork|while' r'|endwhile|break|continue|return|try' r'|except|endtry|finally|in)\b', Keyword), # builtins (r'(random|length)', Name.Builtin), # special variables (r'(player|caller|this|args)', Name.Variable.Instance), # skip whitespace (r'\s+', Text), (r'\n', Text), # other operators (r'([!;=,{}&\|:\.\[\]@\(\)\<\>\?]+)', Operator), # function call (r'([a-z_A-Z0-9]+)(\()', bygroups(Name.Function, Operator)), # variables (r'([a-zA-Z_0-9]+)', Text), ] } class SmalltalkLexer(RegexLexer): """ For `Smalltalk <http://www.smalltalk.org/>`_ syntax. Contributed by Stefan Matthias Aust. Rewritten by Nils Winter. *New in Pygments 0.10.* """ name = 'Smalltalk' filenames = ['*.st'] aliases = ['smalltalk', 'squeak', 'st'] mimetypes = ['text/x-smalltalk'] tokens = { 'root' : [ (r'(<)(\w+:)(.*?)(>)', bygroups(Text, Keyword, Text, Text)), include('squeak fileout'), include('whitespaces'), include('method definition'), (r'(\|)([\w\s]*)(\|)', bygroups(Operator, Name.Variable, Operator)), include('objects'), (r'\^|\:=|\_', Operator), # temporaries (r'[\]({}.;!]', Text), ], 'method definition' : [ # Not perfect can't allow whitespaces at the beginning and the # without breaking everything (r'([a-zA-Z]+\w*:)(\s*)(\w+)', bygroups(Name.Function, Text, Name.Variable)), (r'^(\b[a-zA-Z]+\w*\b)(\s*)$', bygroups(Name.Function, Text)), (r'^([-+*/\\~<>=|&!?,@%]+)(\s*)(\w+)(\s*)$', bygroups(Name.Function, Text, Name.Variable, Text)), ], 'blockvariables' : [ include('whitespaces'), (r'(:)(\s*)(\w+)', bygroups(Operator, Text, Name.Variable)), (r'\|', Operator, '#pop'), (r'', Text, '#pop'), # else pop ], 'literals' : [ (r"'(''|[^'])*'", String, 'afterobject'), (r'\$.', String.Char, 'afterobject'), (r'#\(', String.Symbol, 'parenth'), (r'\)', Text, 'afterobject'), (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number, 'afterobject'), ], '_parenth_helper' : [ include('whitespaces'), (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number), (r'[-+*/\\~<>=|&#!?,@%\w:]+', String.Symbol), # literals (r"'(''|[^'])*'", String), (r'\$.', String.Char), (r'#*\(', String.Symbol, 'inner_parenth'), ], 'parenth' : [ # This state is a bit tricky since # we can't just pop this state (r'\)', String.Symbol, ('root', 'afterobject')), include('_parenth_helper'), ], 'inner_parenth': [ (r'\)', String.Symbol, '#pop'), include('_parenth_helper'), ], 'whitespaces' : [ # skip whitespace and comments (r'\s+', Text), (r'"(""|[^"])*"', Comment), ], 'objects' : [ (r'\[', Text, 'blockvariables'), (r'\]', Text, 'afterobject'), (r'\b(self|super|true|false|nil|thisContext)\b', Name.Builtin.Pseudo, 'afterobject'), (r'\b[A-Z]\w*(?!:)\b', Name.Class, 'afterobject'), (r'\b[a-z]\w*(?!:)\b', Name.Variable, 'afterobject'), (r'#("(""|[^"])*"|[-+*/\\~<>=|&!?,@%]+|[\w:]+)', String.Symbol, 'afterobject'), include('literals'), ], 'afterobject' : [ (r'! !$', Keyword , '#pop'), # squeak chunk delimiter include('whitespaces'), (r'\b(ifTrue:|ifFalse:|whileTrue:|whileFalse:|timesRepeat:)', Name.Builtin, '#pop'), (r'\b(new\b(?!:))', Name.Builtin), (r'\:=|\_', Operator, '#pop'), (r'\b[a-zA-Z]+\w*:', Name.Function, '#pop'), (r'\b[a-zA-Z]+\w*', Name.Function), (r'\w+:?|[-+*/\\~<>=|&!?,@%]+', Name.Function, '#pop'), (r'\.', Punctuation, '#pop'), (r';', Punctuation), (r'[\])}]', Text), (r'[\[({]', Text, '#pop'), ], 'squeak fileout' : [ # Squeak fileout format (optional) (r'^"(""|[^"])*"!', Keyword), (r"^'(''|[^'])*'!", Keyword), (r'^(!)(\w+)( commentStamp: )(.*?)( prior: .*?!\n)(.*?)(!)', bygroups(Keyword, Name.Class, Keyword, String, Keyword, Text, Keyword)), (r"^(!)(\w+(?: class)?)( methodsFor: )('(?:''|[^'])*')(.*?!)", bygroups(Keyword, Name.Class, Keyword, String, Keyword)), (r'^(\w+)( subclass: )(#\w+)' r'(\s+instanceVariableNames: )(.*?)' r'(\s+classVariableNames: )(.*?)' r'(\s+poolDictionaries: )(.*?)' r'(\s+category: )(.*?)(!)', bygroups(Name.Class, Keyword, String.Symbol, Keyword, String, Keyword, String, Keyword, String, Keyword, String, Keyword)), (r'^(\w+(?: class)?)(\s+instanceVariableNames: )(.*?)(!)', bygroups(Name.Class, Keyword, String, Keyword)), (r'(!\n)(\].*)(! !)$', bygroups(Keyword, Text, Keyword)), (r'! !$', Keyword), ], } class LogtalkLexer(RegexLexer): """ For `Logtalk <http://logtalk.org/>`_ source code. *New in Pygments 0.10.* """ name = 'Logtalk' aliases = ['logtalk'] filenames = ['*.lgt'] mimetypes = ['text/x-logtalk'] tokens = { 'root': [ # Directives (r'^\s*:-\s',Punctuation,'directive'), # Comments (r'%.*?\n', Comment), (r'/\*(.|\n)*?\*/',Comment), # Whitespace (r'\n', Text), (r'\s+', Text), # Numbers (r"0'.", Number), (r'0b[01]+', Number), (r'0o[0-7]+', Number), (r'0x[0-9a-fA-F]+', Number), (r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number), # Variables (r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable), # Event handlers (r'(after|before)(?=[(])', Keyword), # Execution-context methods (r'(parameter|this|se(lf|nder))(?=[(])', Keyword), # Reflection (r'(current_predicate|predicate_property)(?=[(])', Keyword), # DCGs and term expansion (r'(expand_(goal|term)|(goal|term)_expansion|phrase)(?=[(])', Keyword), # Entity (r'(abolish|c(reate|urrent))_(object|protocol|category)(?=[(])', Keyword), (r'(object|protocol|category)_property(?=[(])', Keyword), # Entity relations (r'co(mplements_object|nforms_to_protocol)(?=[(])', Keyword), (r'extends_(object|protocol|category)(?=[(])', Keyword), (r'imp(lements_protocol|orts_category)(?=[(])', Keyword), (r'(instantiat|specializ)es_class(?=[(])', Keyword), # Events (r'(current_event|(abolish|define)_events)(?=[(])', Keyword), # Flags (r'(current|set)_logtalk_flag(?=[(])', Keyword), # Compiling, loading, and library paths (r'logtalk_(compile|l(ibrary_path|oad_context|oad))(?=[(])', Keyword), # Database (r'(clause|retract(all)?)(?=[(])', Keyword), (r'a(bolish|ssert(a|z))(?=[(])', Keyword), # Control constructs (r'(ca(ll|tch)|throw)(?=[(])', Keyword), (r'(fail|true)\b', Keyword), # All solutions (r'((bag|set)of|f(ind|or)all)(?=[(])', Keyword), # Multi-threading meta-predicates (r'threaded(_(call|once|ignore|exit|peek|wait|notify))?(?=[(])', Keyword), # Term unification (r'unify_with_occurs_check(?=[(])', Keyword), # Term creation and decomposition (r'(functor|arg|copy_term|numbervars)(?=[(])', Keyword), # Evaluable functors (r'(rem|mod|abs|sign)(?=[(])', Keyword), (r'float(_(integer|fractional)_part)?(?=[(])', Keyword), (r'(floor|truncate|round|ceiling)(?=[(])', Keyword), # Other arithmetic functors (r'(cos|atan|exp|log|s(in|qrt))(?=[(])', Keyword), # Term testing (r'(var|atom(ic)?|integer|float|c(allable|ompound)|n(onvar|umber)|' r'ground)(?=[(])', Keyword), # Term comparison (r'compare(?=[(])', Keyword), # Stream selection and control (r'(curren|se)t_(in|out)put(?=[(])', Keyword), (r'(open|close)(?=[(])', Keyword), (r'flush_output(?=[(])', Keyword), (r'(at_end_of_stream|flush_output)\b', Keyword), (r'(stream_property|at_end_of_stream|set_stream_position)(?=[(])', Keyword), # Character and byte input/output (r'(nl|(get|peek|put)_(byte|c(har|ode)))(?=[(])', Keyword), (r'\bnl\b', Keyword), # Term input/output (r'read(_term)?(?=[(])', Keyword), (r'write(q|_(canonical|term))?(?=[(])', Keyword), (r'(current_)?op(?=[(])', Keyword), (r'(current_)?char_conversion(?=[(])', Keyword), # Atomic term processing (r'atom_(length|c(hars|o(ncat|des)))(?=[(])', Keyword), (r'(char_code|sub_atom)(?=[(])', Keyword), (r'number_c(har|ode)s(?=[(])', Keyword), # Implementation defined hooks functions (r'(se|curren)t_prolog_flag(?=[(])', Keyword), (r'\bhalt\b', Keyword), (r'halt(?=[(])', Keyword), # Message sending operators (r'(::|:|\^\^)', Operator), # External call (r'[{}]', Keyword), # Logic and control (r'\b(ignore|once)(?=[(])', Keyword), (r'\brepeat\b', Keyword), # Sorting (r'(key)?sort(?=[(])', Keyword), # Bitwise functors (r'(>>|<<|/\\|\\\\|\\)', Operator), # Arithemtic evaluation (r'\bis\b', Keyword), # Arithemtic comparison (r'(=:=|=\\=|<|=<|>=|>)', Operator), # Term creation and decomposition (r'=\.\.', Operator), # Term unification (r'(=|\\=)', Operator), # Term comparison (r'(==|\\==|@=<|@<|@>=|@>)', Operator), # Evaluable functors (r'(//|[-+*/])', Operator), (r'\b(e|pi|mod|rem)\b', Operator), # Other arithemtic functors (r'\b\*\*\b', Operator), # DCG rules (r'-->', Operator), # Control constructs (r'([!;]|->)', Operator), # Logic and control (r'\\+', Operator), # Mode operators (r'[?@]', Operator), # Existential quantifier (r'\^', Operator), # Strings (r'"(\\\\|\\"|[^"])*"', String), # Ponctuation (r'[()\[\],.|]', Text), # Atoms (r"[a-z][a-zA-Z0-9_]*", Text), (r"'", String, 'quoted_atom'), ], 'quoted_atom': [ (r"''", String), (r"'", String, '#pop'), (r'\\([\\abfnrtv"\']|(x[a-fA-F0-9]+|[0-7]+)\\)', String.Escape), (r"[^\\'\n]+", String), (r'\\', String), ], 'directive': [ # Conditional compilation directives (r'(el)?if(?=[(])', Keyword, 'root'), (r'(e(lse|ndif))[.]', Keyword, 'root'), # Entity directives (r'(category|object|protocol)(?=[(])', Keyword, 'entityrelations'), (r'(end_(category|object|protocol))[.]',Keyword, 'root'), # Predicate scope directives (r'(public|protected|private)(?=[(])', Keyword, 'root'), # Other directives (r'e(n(coding|sure_loaded)|xport)(?=[(])', Keyword, 'root'), (r'in(fo|itialization)(?=[(])', Keyword, 'root'), (r'(dynamic|synchronized|threaded)[.]', Keyword, 'root'), (r'(alias|d(ynamic|iscontiguous)|m(eta_predicate|ode|ultifile)|' r's(et_(logtalk|prolog)_flag|ynchronized))(?=[(])', Keyword, 'root'), (r'op(?=[(])', Keyword, 'root'), (r'(c(alls|oinductive)|reexport|use(s|_module))(?=[(])', Keyword, 'root'), (r'[a-z][a-zA-Z0-9_]*(?=[(])', Text, 'root'), (r'[a-z][a-zA-Z0-9_]*[.]', Text, 'root'), ], 'entityrelations': [ (r'(complements|extends|i(nstantiates|mp(lements|orts))|specializes)' r'(?=[(])', Keyword), # Numbers (r"0'.", Number), (r'0b[01]+', Number), (r'0o[0-7]+', Number), (r'0x[0-9a-fA-F]+', Number), (r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number), # Variables (r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable), # Atoms (r"[a-z][a-zA-Z0-9_]*", Text), (r"'", String, 'quoted_atom'), # Strings (r'"(\\\\|\\"|[^"])*"', String), # End of entity-opening directive (r'([)]\.)', Text, 'root'), # Scope operator (r'(::)', Operator), # Ponctuation (r'[()\[\],.|]', Text), # Comments (r'%.*?\n', Comment), (r'/\*(.|\n)*?\*/',Comment), # Whitespace (r'\n', Text), (r'\s+', Text), ] } def analyse_text(text): if ':- object(' in text: return True if ':- protocol(' in text: return True if ':- category(' in text: return True return False def _shortened(word): dpos = word.find('$') return '|'.join([word[:dpos] + word[dpos+1:i] + r'\b' for i in range(len(word), dpos, -1)]) def _shortened_many(*words): return '|'.join(map(_shortened, words)) class GnuplotLexer(RegexLexer): """ For `Gnuplot <http://gnuplot.info/>`_ plotting scripts. *New in Pygments 0.11.* """ name = 'Gnuplot' aliases = ['gnuplot'] filenames = ['*.plot', '*.plt'] mimetypes = ['text/x-gnuplot'] tokens = { 'root': [ include('whitespace'), (_shortened('bi$nd'), Keyword, 'bind'), (_shortened_many('ex$it', 'q$uit'), Keyword, 'quit'), (_shortened('f$it'), Keyword, 'fit'), (r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation), 'if'), (r'else\b', Keyword), (_shortened('pa$use'), Keyword, 'pause'), (_shortened_many('p$lot', 'rep$lot', 'sp$lot'), Keyword, 'plot'), (_shortened('sa$ve'), Keyword, 'save'), (_shortened('se$t'), Keyword, ('genericargs', 'optionarg')), (_shortened_many('sh$ow', 'uns$et'), Keyword, ('noargs', 'optionarg')), (_shortened_many('low$er', 'ra$ise', 'ca$ll', 'cd$', 'cl$ear', 'h$elp', '\\?$', 'hi$story', 'l$oad', 'pr$int', 'pwd$', 're$read', 'res$et', 'scr$eendump', 'she$ll', 'sy$stem', 'up$date'), Keyword, 'genericargs'), (_shortened_many('pwd$', 're$read', 'res$et', 'scr$eendump', 'she$ll', 'test$'), Keyword, 'noargs'), ('([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(=)', bygroups(Name.Variable, Text, Operator), 'genericargs'), ('([a-zA-Z_][a-zA-Z0-9_]*)(\s*\(.*?\)\s*)(=)', bygroups(Name.Function, Text, Operator), 'genericargs'), (r'@[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant), # macros (r';', Keyword), ], 'comment': [ (r'[^\\\n]', Comment), (r'\\\n', Comment), (r'\\', Comment), # don't add the newline to the Comment token ('', Comment, '#pop'), ], 'whitespace': [ ('#', Comment, 'comment'), (r'[ \t\v\f]+', Text), ], 'noargs': [ include('whitespace'), # semicolon and newline end the argument list (r';', Punctuation, '#pop'), (r'\n', Text, '#pop'), ], 'dqstring': [ (r'"', String, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), (r'[^\\"\n]+', String), # all other characters (r'\\\n', String), # line continuation (r'\\', String), # stray backslash (r'\n', String, '#pop'), # newline ends the string too ], 'sqstring': [ (r"''", String), # escaped single quote (r"'", String, '#pop'), (r"[^\\'\n]+", String), # all other characters (r'\\\n', String), # line continuation (r'\\', String), # normal backslash (r'\n', String, '#pop'), # newline ends the string too ], 'genericargs': [ include('noargs'), (r'"', String, 'dqstring'), (r"'", String, 'sqstring'), (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float), (r'(\d+\.\d*|\.\d+)', Number.Float), (r'-?\d+', Number.Integer), ('[,.~!%^&*+=|?:<>/-]', Operator), ('[{}()\[\]]', Punctuation), (r'(eq|ne)\b', Operator.Word), (r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(\()', bygroups(Name.Function, Text, Punctuation)), (r'[a-zA-Z_][a-zA-Z0-9_]*', Name), (r'@[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant), # macros (r'\\\n', Text), ], 'optionarg': [ include('whitespace'), (_shortened_many( "a$ll","an$gles","ar$row","au$toscale","b$ars","bor$der", "box$width","cl$abel","c$lip","cn$trparam","co$ntour","da$ta", "data$file","dg$rid3d","du$mmy","enc$oding","dec$imalsign", "fit$","font$path","fo$rmat","fu$nction","fu$nctions","g$rid", "hid$den3d","his$torysize","is$osamples","k$ey","keyt$itle", "la$bel","li$nestyle","ls$","loa$dpath","loc$ale","log$scale", "mac$ros","map$ping","map$ping3d","mar$gin","lmar$gin", "rmar$gin","tmar$gin","bmar$gin","mo$use","multi$plot", "mxt$ics","nomxt$ics","mx2t$ics","nomx2t$ics","myt$ics", "nomyt$ics","my2t$ics","nomy2t$ics","mzt$ics","nomzt$ics", "mcbt$ics","nomcbt$ics","of$fsets","or$igin","o$utput", "pa$rametric","pm$3d","pal$ette","colorb$ox","p$lot", "poi$ntsize","pol$ar","pr$int","obj$ect","sa$mples","si$ze", "st$yle","su$rface","table$","t$erminal","termo$ptions","ti$cs", "ticsc$ale","ticsl$evel","timef$mt","tim$estamp","tit$le", "v$ariables","ve$rsion","vi$ew","xyp$lane","xda$ta","x2da$ta", "yda$ta","y2da$ta","zda$ta","cbda$ta","xl$abel","x2l$abel", "yl$abel","y2l$abel","zl$abel","cbl$abel","xti$cs","noxti$cs", "x2ti$cs","nox2ti$cs","yti$cs","noyti$cs","y2ti$cs","noy2ti$cs", "zti$cs","nozti$cs","cbti$cs","nocbti$cs","xdti$cs","noxdti$cs", "x2dti$cs","nox2dti$cs","ydti$cs","noydti$cs","y2dti$cs", "noy2dti$cs","zdti$cs","nozdti$cs","cbdti$cs","nocbdti$cs", "xmti$cs","noxmti$cs","x2mti$cs","nox2mti$cs","ymti$cs", "noymti$cs","y2mti$cs","noy2mti$cs","zmti$cs","nozmti$cs", "cbmti$cs","nocbmti$cs","xr$ange","x2r$ange","yr$ange", "y2r$ange","zr$ange","cbr$ange","rr$ange","tr$ange","ur$ange", "vr$ange","xzeroa$xis","x2zeroa$xis","yzeroa$xis","y2zeroa$xis", "zzeroa$xis","zeroa$xis","z$ero"), Name.Builtin, '#pop'), ], 'bind': [ ('!', Keyword, '#pop'), (_shortened('all$windows'), Name.Builtin), include('genericargs'), ], 'quit': [ (r'gnuplot\b', Keyword), include('noargs'), ], 'fit': [ (r'via\b', Name.Builtin), include('plot'), ], 'if': [ (r'\)', Punctuation, '#pop'), include('genericargs'), ], 'pause': [ (r'(mouse|any|button1|button2|button3)\b', Name.Builtin), (_shortened('key$press'), Name.Builtin), include('genericargs'), ], 'plot': [ (_shortened_many('ax$es', 'axi$s', 'bin$ary', 'ev$ery', 'i$ndex', 'mat$rix', 's$mooth', 'thru$', 't$itle', 'not$itle', 'u$sing', 'w$ith'), Name.Builtin), include('genericargs'), ], 'save': [ (_shortened_many('f$unctions', 's$et', 't$erminal', 'v$ariables'), Name.Builtin), include('genericargs'), ], } class PovrayLexer(RegexLexer): """ For `Persistence of Vision Raytracer <http://www.povray.org/>`_ files. *New in Pygments 0.11.* """ name = 'POVRay' aliases = ['pov'] filenames = ['*.pov', '*.inc'] mimetypes = ['text/x-povray'] tokens = { 'root': [ (r'/\*[\w\W]*?\*/', Comment.Multiline), (r'//.*\n', Comment.Single), (r'(?s)"(?:\\.|[^"\\])+"', String.Double), (r'#(debug|default|else|end|error|fclose|fopen|ifdef|ifndef|' r'include|range|read|render|statistics|switch|undef|version|' r'warning|while|write|define|macro|local|declare)\b', Comment.Preproc), (r'\b(aa_level|aa_threshold|abs|acos|acosh|adaptive|adc_bailout|' r'agate|agate_turb|all|alpha|ambient|ambient_light|angle|' r'aperture|arc_angle|area_light|asc|asin|asinh|assumed_gamma|' r'atan|atan2|atanh|atmosphere|atmospheric_attenuation|' r'attenuating|average|background|black_hole|blue|blur_samples|' r'bounded_by|box_mapping|bozo|break|brick|brick_size|' r'brightness|brilliance|bumps|bumpy1|bumpy2|bumpy3|bump_map|' r'bump_size|case|caustics|ceil|checker|chr|clipped_by|clock|' r'color|color_map|colour|colour_map|component|composite|concat|' r'confidence|conic_sweep|constant|control0|control1|cos|cosh|' r'count|crackle|crand|cube|cubic_spline|cylindrical_mapping|' r'debug|declare|default|degrees|dents|diffuse|direction|' r'distance|distance_maximum|div|dust|dust_type|eccentricity|' r'else|emitting|end|error|error_bound|exp|exponent|' r'fade_distance|fade_power|falloff|falloff_angle|false|' r'file_exists|filter|finish|fisheye|flatness|flip|floor|' r'focal_point|fog|fog_alt|fog_offset|fog_type|frequency|gif|' r'global_settings|glowing|gradient|granite|gray_threshold|' r'green|halo|hexagon|hf_gray_16|hierarchy|hollow|hypercomplex|' r'if|ifdef|iff|image_map|incidence|include|int|interpolate|' r'inverse|ior|irid|irid_wavelength|jitter|lambda|leopard|' r'linear|linear_spline|linear_sweep|location|log|looks_like|' r'look_at|low_error_factor|mandel|map_type|marble|material_map|' r'matrix|max|max_intersections|max_iteration|max_trace_level|' r'max_value|metallic|min|minimum_reuse|mod|mortar|' r'nearest_count|no|normal|normal_map|no_shadow|number_of_waves|' r'octaves|off|offset|omega|omnimax|on|once|onion|open|' r'orthographic|panoramic|pattern1|pattern2|pattern3|' r'perspective|pgm|phase|phong|phong_size|pi|pigment|' r'pigment_map|planar_mapping|png|point_at|pot|pow|ppm|' r'precision|pwr|quadratic_spline|quaternion|quick_color|' r'quick_colour|quilted|radial|radians|radiosity|radius|rainbow|' r'ramp_wave|rand|range|reciprocal|recursion_limit|red|' r'reflection|refraction|render|repeat|rgb|rgbf|rgbft|rgbt|' r'right|ripples|rotate|roughness|samples|scale|scallop_wave|' r'scattering|seed|shadowless|sin|sine_wave|sinh|sky|sky_sphere|' r'slice|slope_map|smooth|specular|spherical_mapping|spiral|' r'spiral1|spiral2|spotlight|spotted|sqr|sqrt|statistics|str|' r'strcmp|strength|strlen|strlwr|strupr|sturm|substr|switch|sys|' r't|tan|tanh|test_camera_1|test_camera_2|test_camera_3|' r'test_camera_4|texture|texture_map|tga|thickness|threshold|' r'tightness|tile2|tiles|track|transform|translate|transmit|' r'triangle_wave|true|ttf|turbulence|turb_depth|type|' r'ultra_wide_angle|up|use_color|use_colour|use_index|u_steps|' r'val|variance|vaxis_rotate|vcross|vdot|version|vlength|' r'vnormalize|volume_object|volume_rendered|vol_with_light|' r'vrotate|v_steps|warning|warp|water_level|waves|while|width|' r'wood|wrinkles|yes)\b', Keyword), (r'(bicubic_patch|blob|box|camera|cone|cubic|cylinder|difference|' r'disc|height_field|intersection|julia_fractal|lathe|' r'light_source|merge|mesh|object|plane|poly|polygon|prism|' r'quadric|quartic|smooth_triangle|sor|sphere|superellipsoid|' r'text|torus|triangle|union)\b', Name.Builtin), # TODO: <=, etc (r'[\[\](){}<>;,]', Punctuation), (r'[-+*/=]', Operator), (r'\b(x|y|z|u|v)\b', Name.Builtin.Pseudo), (r'[a-zA-Z_][a-zA-Z_0-9]*', Name), (r'[0-9]+\.[0-9]*', Number.Float), (r'\.[0-9]+', Number.Float), (r'[0-9]+', Number.Integer), (r'\s+', Text), ] } class AppleScriptLexer(RegexLexer): """ For `AppleScript source code <http://developer.apple.com/documentation/AppleScript/ Conceptual/AppleScriptLangGuide>`_, including `AppleScript Studio <http://developer.apple.com/documentation/AppleScript/ Reference/StudioReference>`_. Contributed by Andreas Amann <aamann@mac.com>. """ name = 'AppleScript' aliases = ['applescript'] filenames = ['*.applescript'] flags = re.MULTILINE | re.DOTALL Identifiers = r'[a-zA-Z]\w*' Literals = ['AppleScript', 'current application', 'false', 'linefeed', 'missing value', 'pi','quote', 'result', 'return', 'space', 'tab', 'text item delimiters', 'true', 'version'] Classes = ['alias ', 'application ', 'boolean ', 'class ', 'constant ', 'date ', 'file ', 'integer ', 'list ', 'number ', 'POSIX file ', 'real ', 'record ', 'reference ', 'RGB color ', 'script ', 'text ', 'unit types', '(?:Unicode )?text', 'string'] BuiltIn = ['attachment', 'attribute run', 'character', 'day', 'month', 'paragraph', 'word', 'year'] HandlerParams = ['about', 'above', 'against', 'apart from', 'around', 'aside from', 'at', 'below', 'beneath', 'beside', 'between', 'for', 'given', 'instead of', 'on', 'onto', 'out of', 'over', 'since'] Commands = ['ASCII (character|number)', 'activate', 'beep', 'choose URL', 'choose application', 'choose color', 'choose file( name)?', 'choose folder', 'choose from list', 'choose remote application', 'clipboard info', 'close( access)?', 'copy', 'count', 'current date', 'delay', 'delete', 'display (alert|dialog)', 'do shell script', 'duplicate', 'exists', 'get eof', 'get volume settings', 'info for', 'launch', 'list (disks|folder)', 'load script', 'log', 'make', 'mount volume', 'new', 'offset', 'open( (for access|location))?', 'path to', 'print', 'quit', 'random number', 'read', 'round', 'run( script)?', 'say', 'scripting components', 'set (eof|the clipboard to|volume)', 'store script', 'summarize', 'system attribute', 'system info', 'the clipboard', 'time to GMT', 'write', 'quoted form'] References = ['(in )?back of', '(in )?front of', '[0-9]+(st|nd|rd|th)', 'first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh', 'eighth', 'ninth', 'tenth', 'after', 'back', 'before', 'behind', 'every', 'front', 'index', 'last', 'middle', 'some', 'that', 'through', 'thru', 'where', 'whose'] Operators = ["and", "or", "is equal", "equals", "(is )?equal to", "is not", "isn't", "isn't equal( to)?", "is not equal( to)?", "doesn't equal", "does not equal", "(is )?greater than", "comes after", "is not less than or equal( to)?", "isn't less than or equal( to)?", "(is )?less than", "comes before", "is not greater than or equal( to)?", "isn't greater than or equal( to)?", "(is )?greater than or equal( to)?", "is not less than", "isn't less than", "does not come before", "doesn't come before", "(is )?less than or equal( to)?", "is not greater than", "isn't greater than", "does not come after", "doesn't come after", "starts? with", "begins? with", "ends? with", "contains?", "does not contain", "doesn't contain", "is in", "is contained by", "is not in", "is not contained by", "isn't contained by", "div", "mod", "not", "(a )?(ref( to)?|reference to)", "is", "does"] Control = ['considering', 'else', 'error', 'exit', 'from', 'if', 'ignoring', 'in', 'repeat', 'tell', 'then', 'times', 'to', 'try', 'until', 'using terms from', 'while', 'whith', 'with timeout( of)?', 'with transaction', 'by', 'continue', 'end', 'its?', 'me', 'my', 'return', 'of' , 'as'] Declarations = ['global', 'local', 'prop(erty)?', 'set', 'get'] Reserved = ['but', 'put', 'returning', 'the'] StudioClasses = ['action cell', 'alert reply', 'application', 'box', 'browser( cell)?', 'bundle', 'button( cell)?', 'cell', 'clip view', 'color well', 'color-panel', 'combo box( item)?', 'control', 'data( (cell|column|item|row|source))?', 'default entry', 'dialog reply', 'document', 'drag info', 'drawer', 'event', 'font(-panel)?', 'formatter', 'image( (cell|view))?', 'matrix', 'menu( item)?', 'item', 'movie( view)?', 'open-panel', 'outline view', 'panel', 'pasteboard', 'plugin', 'popup button', 'progress indicator', 'responder', 'save-panel', 'scroll view', 'secure text field( cell)?', 'slider', 'sound', 'split view', 'stepper', 'tab view( item)?', 'table( (column|header cell|header view|view))', 'text( (field( cell)?|view))?', 'toolbar( item)?', 'user-defaults', 'view', 'window'] StudioEvents = ['accept outline drop', 'accept table drop', 'action', 'activated', 'alert ended', 'awake from nib', 'became key', 'became main', 'begin editing', 'bounds changed', 'cell value', 'cell value changed', 'change cell value', 'change item value', 'changed', 'child of item', 'choose menu item', 'clicked', 'clicked toolbar item', 'closed', 'column clicked', 'column moved', 'column resized', 'conclude drop', 'data representation', 'deminiaturized', 'dialog ended', 'document nib name', 'double clicked', 'drag( (entered|exited|updated))?', 'drop', 'end editing', 'exposed', 'idle', 'item expandable', 'item value', 'item value changed', 'items changed', 'keyboard down', 'keyboard up', 'launched', 'load data representation', 'miniaturized', 'mouse down', 'mouse dragged', 'mouse entered', 'mouse exited', 'mouse moved', 'mouse up', 'moved', 'number of browser rows', 'number of items', 'number of rows', 'open untitled', 'opened', 'panel ended', 'parameters updated', 'plugin loaded', 'prepare drop', 'prepare outline drag', 'prepare outline drop', 'prepare table drag', 'prepare table drop', 'read from file', 'resigned active', 'resigned key', 'resigned main', 'resized( sub views)?', 'right mouse down', 'right mouse dragged', 'right mouse up', 'rows changed', 'scroll wheel', 'selected tab view item', 'selection changed', 'selection changing', 'should begin editing', 'should close', 'should collapse item', 'should end editing', 'should expand item', 'should open( untitled)?', 'should quit( after last window closed)?', 'should select column', 'should select item', 'should select row', 'should select tab view item', 'should selection change', 'should zoom', 'shown', 'update menu item', 'update parameters', 'update toolbar item', 'was hidden', 'was miniaturized', 'will become active', 'will close', 'will dismiss', 'will display browser cell', 'will display cell', 'will display item cell', 'will display outline cell', 'will finish launching', 'will hide', 'will miniaturize', 'will move', 'will open', 'will pop up', 'will quit', 'will resign active', 'will resize( sub views)?', 'will select tab view item', 'will show', 'will zoom', 'write to file', 'zoomed'] StudioCommands = ['animate', 'append', 'call method', 'center', 'close drawer', 'close panel', 'display', 'display alert', 'display dialog', 'display panel', 'go', 'hide', 'highlight', 'increment', 'item for', 'load image', 'load movie', 'load nib', 'load panel', 'load sound', 'localized string', 'lock focus', 'log', 'open drawer', 'path for', 'pause', 'perform action', 'play', 'register', 'resume', 'scroll', 'select( all)?', 'show', 'size to fit', 'start', 'step back', 'step forward', 'stop', 'synchronize', 'unlock focus', 'update'] StudioProperties = ['accepts arrow key', 'action method', 'active', 'alignment', 'allowed identifiers', 'allows branch selection', 'allows column reordering', 'allows column resizing', 'allows column selection', 'allows customization', 'allows editing text attributes', 'allows empty selection', 'allows mixed state', 'allows multiple selection', 'allows reordering', 'allows undo', 'alpha( value)?', 'alternate image', 'alternate increment value', 'alternate title', 'animation delay', 'associated file name', 'associated object', 'auto completes', 'auto display', 'auto enables items', 'auto repeat', 'auto resizes( outline column)?', 'auto save expanded items', 'auto save name', 'auto save table columns', 'auto saves configuration', 'auto scroll', 'auto sizes all columns to fit', 'auto sizes cells', 'background color', 'bezel state', 'bezel style', 'bezeled', 'border rect', 'border type', 'bordered', 'bounds( rotation)?', 'box type', 'button returned', 'button type', 'can choose directories', 'can choose files', 'can draw', 'can hide', 'cell( (background color|size|type))?', 'characters', 'class', 'click count', 'clicked( data)? column', 'clicked data item', 'clicked( data)? row', 'closeable', 'collating', 'color( (mode|panel))', 'command key down', 'configuration', 'content(s| (size|view( margins)?))?', 'context', 'continuous', 'control key down', 'control size', 'control tint', 'control view', 'controller visible', 'coordinate system', 'copies( on scroll)?', 'corner view', 'current cell', 'current column', 'current( field)? editor', 'current( menu)? item', 'current row', 'current tab view item', 'data source', 'default identifiers', 'delta (x|y|z)', 'destination window', 'directory', 'display mode', 'displayed cell', 'document( (edited|rect|view))?', 'double value', 'dragged column', 'dragged distance', 'dragged items', 'draws( cell)? background', 'draws grid', 'dynamically scrolls', 'echos bullets', 'edge', 'editable', 'edited( data)? column', 'edited data item', 'edited( data)? row', 'enabled', 'enclosing scroll view', 'ending page', 'error handling', 'event number', 'event type', 'excluded from windows menu', 'executable path', 'expanded', 'fax number', 'field editor', 'file kind', 'file name', 'file type', 'first responder', 'first visible column', 'flipped', 'floating', 'font( panel)?', 'formatter', 'frameworks path', 'frontmost', 'gave up', 'grid color', 'has data items', 'has horizontal ruler', 'has horizontal scroller', 'has parent data item', 'has resize indicator', 'has shadow', 'has sub menu', 'has vertical ruler', 'has vertical scroller', 'header cell', 'header view', 'hidden', 'hides when deactivated', 'highlights by', 'horizontal line scroll', 'horizontal page scroll', 'horizontal ruler view', 'horizontally resizable', 'icon image', 'id', 'identifier', 'ignores multiple clicks', 'image( (alignment|dims when disabled|frame style|' 'scaling))?', 'imports graphics', 'increment value', 'indentation per level', 'indeterminate', 'index', 'integer value', 'intercell spacing', 'item height', 'key( (code|equivalent( modifier)?|window))?', 'knob thickness', 'label', 'last( visible)? column', 'leading offset', 'leaf', 'level', 'line scroll', 'loaded', 'localized sort', 'location', 'loop mode', 'main( (bunde|menu|window))?', 'marker follows cell', 'matrix mode', 'maximum( content)? size', 'maximum visible columns', 'menu( form representation)?', 'miniaturizable', 'miniaturized', 'minimized image', 'minimized title', 'minimum column width', 'minimum( content)? size', 'modal', 'modified', 'mouse down state', 'movie( (controller|file|rect))?', 'muted', 'name', 'needs display', 'next state', 'next text', 'number of tick marks', 'only tick mark values', 'opaque', 'open panel', 'option key down', 'outline table column', 'page scroll', 'pages across', 'pages down', 'palette label', 'pane splitter', 'parent data item', 'parent window', 'pasteboard', 'path( (names|separator))?', 'playing', 'plays every frame', 'plays selection only', 'position', 'preferred edge', 'preferred type', 'pressure', 'previous text', 'prompt', 'properties', 'prototype cell', 'pulls down', 'rate', 'released when closed', 'repeated', 'requested print time', 'required file type', 'resizable', 'resized column', 'resource path', 'returns records', 'reuses columns', 'rich text', 'roll over', 'row height', 'rulers visible', 'save panel', 'scripts path', 'scrollable', 'selectable( identifiers)?', 'selected cell', 'selected( data)? columns?', 'selected data items?', 'selected( data)? rows?', 'selected item identifier', 'selection by rect', 'send action on arrow key', 'sends action when done editing', 'separates columns', 'separator item', 'sequence number', 'services menu', 'shared frameworks path', 'shared support path', 'sheet', 'shift key down', 'shows alpha', 'shows state by', 'size( mode)?', 'smart insert delete enabled', 'sort case sensitivity', 'sort column', 'sort order', 'sort type', 'sorted( data rows)?', 'sound', 'source( mask)?', 'spell checking enabled', 'starting page', 'state', 'string value', 'sub menu', 'super menu', 'super view', 'tab key traverses cells', 'tab state', 'tab type', 'tab view', 'table view', 'tag', 'target( printer)?', 'text color', 'text container insert', 'text container origin', 'text returned', 'tick mark position', 'time stamp', 'title(d| (cell|font|height|position|rect))?', 'tool tip', 'toolbar', 'trailing offset', 'transparent', 'treat packages as directories', 'truncated labels', 'types', 'unmodified characters', 'update views', 'use sort indicator', 'user defaults', 'uses data source', 'uses ruler', 'uses threaded animation', 'uses title from previous column', 'value wraps', 'version', 'vertical( (line scroll|page scroll|ruler view))?', 'vertically resizable', 'view', 'visible( document rect)?', 'volume', 'width', 'window', 'windows menu', 'wraps', 'zoomable', 'zoomed'] tokens = { 'root': [ (r'\s+', Text), (ur'¬\n', String.Escape), (r"'s\s+", Text), # This is a possessive, consider moving (r'(--|#).*?$', Comment), (r'\(\*', Comment.Multiline, 'comment'), (r'[\(\){}!,.:]', Punctuation), (ur'(«)([^»]+)(»)', bygroups(Text, Name.Builtin, Text)), (r'\b((?:considering|ignoring)\s*)' r'(application responses|case|diacriticals|hyphens|' r'numeric strings|punctuation|white space)', bygroups(Keyword, Name.Builtin)), (ur'(-|\*|\+|&|≠|>=?|<=?|=|≥|≤|/|÷|\^)', Operator), (r"\b(%s)\b" % '|'.join(Operators), Operator.Word), (r'^(\s*(?:on|end)\s+)' r'(%s)' % '|'.join(StudioEvents[::-1]), bygroups(Keyword, Name.Function)), (r'^(\s*)(in|on|script|to)(\s+)', bygroups(Text, Keyword, Text)), (r'\b(as )(%s)\b' % '|'.join(Classes), bygroups(Keyword, Name.Class)), (r'\b(%s)\b' % '|'.join(Literals), Name.Constant), (r'\b(%s)\b' % '|'.join(Commands), Name.Builtin), (r'\b(%s)\b' % '|'.join(Control), Keyword), (r'\b(%s)\b' % '|'.join(Declarations), Keyword), (r'\b(%s)\b' % '|'.join(Reserved), Name.Builtin), (r'\b(%s)s?\b' % '|'.join(BuiltIn), Name.Builtin), (r'\b(%s)\b' % '|'.join(HandlerParams), Name.Builtin), (r'\b(%s)\b' % '|'.join(StudioProperties), Name.Attribute), (r'\b(%s)s?\b' % '|'.join(StudioClasses), Name.Builtin), (r'\b(%s)\b' % '|'.join(StudioCommands), Name.Builtin), (r'\b(%s)\b' % '|'.join(References), Name.Builtin), (r'"(\\\\|\\"|[^"])*"', String.Double), (r'\b(%s)\b' % Identifiers, Name.Variable), (r'[-+]?(\d+\.\d*|\d*\.\d+)(E[-+][0-9]+)?', Number.Float), (r'[-+]?\d+', Number.Integer), ], 'comment': [ ('\(\*', Comment.Multiline, '#push'), ('\*\)', Comment.Multiline, '#pop'), ('[^*(]+', Comment.Multiline), ('[*(]', Comment.Multiline), ], } class ModelicaLexer(RegexLexer): """ For `Modelica <http://www.modelica.org/>`_ source code. *New in Pygments 1.1.* """ name = 'Modelica' aliases = ['modelica'] filenames = ['*.mo'] mimetypes = ['text/x-modelica'] flags = re.IGNORECASE | re.DOTALL tokens = { 'whitespace': [ (r'\n', Text), (r'\s+', Text), (r'\\\n', Text), # line continuation (r'//(\n|(.|\n)*?[^\\]\n)', Comment), (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment), ], 'statements': [ (r'"', String, 'string'), (r'(\d+\.\d*|\.\d+|\d+|\d.)[eE][+-]?\d+[lL]?', Number.Float), (r'(\d+\.\d*|\.\d+)', Number.Float), (r'\d+[Ll]?', Number.Integer), (r'[~!%^&*+=|?:<>/-]', Operator), (r'[()\[\]{},.;]', Punctuation), (r'(true|false|NULL|Real|Integer|Boolean)\b', Name.Builtin), (r"([a-zA-Z_][\w]*|'[a-zA-Z_\+\-\*\/\^][\w]*')" r"(\.([a-zA-Z_][\w]*|'[a-zA-Z_\+\-\*\/\^][\w]*'))+", Name.Class), (r"('[\w\+\-\*\/\^]+'|\w+)", Name), ], 'root': [ include('whitespace'), include('keywords'), include('functions'), include('operators'), include('classes'), (r'("<html>|<html>)', Name.Tag, 'html-content'), include('statements'), ], 'keywords': [ (r'(algorithm|annotation|break|connect|constant|constrainedby|' r'discrete|each|else|elseif|elsewhen|encapsulated|enumeration|' r'end|equation|exit|expandable|extends|' r'external|false|final|flow|for|if|import|impure|in|initial\sequation|' r'inner|input|loop|nondiscrete|outer|output|parameter|partial|' r'protected|public|pure|redeclare|replaceable|stream|time|then|true|' r'when|while|within)\b', Keyword), ], 'functions': [ (r'(abs|acos|acosh|asin|asinh|atan|atan2|atan3|ceil|cos|cosh|' r'cross|div|exp|floor|getInstanceName|log|log10|mod|rem|' r'semiLinear|sign|sin|sinh|size|spatialDistribution|sqrt|tan|' r'tanh|zeros)\b', Name.Function), ], 'operators': [ (r'(actualStream|and|assert|cardinality|change|Clock|delay|der|edge|' r'hold|homotopy|initial|inStream|noEvent|not|or|pre|previous|reinit|' r'return|sample|smooth|spatialDistribution|subSample|terminal|' r'terminate)\b', Name.Builtin), ], 'classes': [ (r'(block|class|connector|function|model|package|' r'record|type)(\s+)([A-Za-z_]+)', bygroups(Keyword, Text, Name.Class)) ], 'string': [ (r'"', String, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), (r'[^\\"\n]+', String), # all other characters (r'\\\n', String), # line continuation (r'\\', String), # stray backslash ], 'html-content': [ (r'<\s*/\s*html\s*>', Name.Tag, '#pop'), (r'.+?(?=<\s*/\s*html\s*>)', using(HtmlLexer)), ] } class RebolLexer(RegexLexer): """ A `REBOL <http://www.rebol.com/>`_ lexer. *New in Pygments 1.1.* """ name = 'REBOL' aliases = ['rebol'] filenames = ['*.r', '*.r3'] mimetypes = ['text/x-rebol'] flags = re.IGNORECASE | re.MULTILINE re.IGNORECASE escape_re = r'(?:\^\([0-9a-fA-F]{1,4}\)*)' def word_callback(lexer, match): word = match.group() if re.match(".*:$", word): yield match.start(), Generic.Subheading, word elif re.match( r'(native|alias|all|any|as-string|as-binary|bind|bound\?|case|' r'catch|checksum|comment|debase|dehex|exclude|difference|disarm|' r'either|else|enbase|foreach|remove-each|form|free|get|get-env|if|' r'in|intersect|loop|minimum-of|maximum-of|mold|new-line|' r'new-line\?|not|now|prin|print|reduce|compose|construct|repeat|' r'reverse|save|script\?|set|shift|switch|throw|to-hex|trace|try|' r'type\?|union|unique|unless|unprotect|unset|until|use|value\?|' r'while|compress|decompress|secure|open|close|read|read-io|' r'write-io|write|update|query|wait|input\?|exp|log-10|log-2|' r'log-e|square-root|cosine|sine|tangent|arccosine|arcsine|' r'arctangent|protect|lowercase|uppercase|entab|detab|connected\?|' r'browse|launch|stats|get-modes|set-modes|to-local-file|' r'to-rebol-file|encloak|decloak|create-link|do-browser|bind\?|' r'hide|draw|show|size-text|textinfo|offset-to-caret|' r'caret-to-offset|local-request-file|rgb-to-hsv|hsv-to-rgb|' r'crypt-strength\?|dh-make-key|dh-generate-key|dh-compute-key|' r'dsa-make-key|dsa-generate-key|dsa-make-signature|' r'dsa-verify-signature|rsa-make-key|rsa-generate-key|' r'rsa-encrypt)$', word): yield match.start(), Name.Builtin, word elif re.match( r'(add|subtract|multiply|divide|remainder|power|and~|or~|xor~|' r'minimum|maximum|negate|complement|absolute|random|head|tail|' r'next|back|skip|at|pick|first|second|third|fourth|fifth|sixth|' r'seventh|eighth|ninth|tenth|last|path|find|select|make|to|copy\*|' r'insert|remove|change|poke|clear|trim|sort|min|max|abs|cp|' r'copy)$', word): yield match.start(), Name.Function, word elif re.match( r'(error|source|input|license|help|install|echo|Usage|with|func|' r'throw-on-error|function|does|has|context|probe|\?\?|as-pair|' r'mod|modulo|round|repend|about|set-net|append|join|rejoin|reform|' r'remold|charset|array|replace|move|extract|forskip|forall|alter|' r'first+|also|take|for|forever|dispatch|attempt|what-dir|' r'change-dir|clean-path|list-dir|dirize|rename|split-path|delete|' r'make-dir|delete-dir|in-dir|confirm|dump-obj|upgrade|what|' r'build-tag|process-source|build-markup|decode-cgi|read-cgi|' r'write-user|save-user|set-user-name|protect-system|parse-xml|' r'cvs-date|cvs-version|do-boot|get-net-info|desktop|layout|' r'scroll-para|get-face|alert|set-face|uninstall|unfocus|' r'request-dir|center-face|do-events|net-error|decode-url|' r'parse-header|parse-header-date|parse-email-addrs|import-email|' r'send|build-attach-body|resend|show-popup|hide-popup|open-events|' r'find-key-face|do-face|viewtop|confine|find-window|' r'insert-event-func|remove-event-func|inform|dump-pane|dump-face|' r'flag-face|deflag-face|clear-fields|read-net|vbug|path-thru|' r'read-thru|load-thru|do-thru|launch-thru|load-image|' r'request-download|do-face-alt|set-font|set-para|get-style|' r'set-style|make-face|stylize|choose|hilight-text|hilight-all|' r'unlight-text|focus|scroll-drag|clear-face|reset-face|scroll-face|' r'resize-face|load-stock|load-stock-block|notify|request|flash|' r'request-color|request-pass|request-text|request-list|' r'request-date|request-file|dbug|editor|link-relative-path|' r'emailer|parse-error)$', word): yield match.start(), Keyword.Namespace, word elif re.match( r'(halt|quit|do|load|q|recycle|call|run|ask|parse|view|unview|' r'return|exit|break)$', word): yield match.start(), Name.Exception, word elif re.match('REBOL$', word): yield match.start(), Generic.Heading, word elif re.match("to-.*", word): yield match.start(), Keyword, word elif re.match('(\+|-|\*|/|//|\*\*|and|or|xor|=\?|=|==|<>|<|>|<=|>=)$', word): yield match.start(), Operator, word elif re.match(".*\?$", word): yield match.start(), Keyword, word elif re.match(".*\!$", word): yield match.start(), Keyword.Type, word elif re.match("'.*", word): yield match.start(), Name.Variable.Instance, word # lit-word elif re.match("#.*", word): yield match.start(), Name.Label, word # issue elif re.match("%.*", word): yield match.start(), Name.Decorator, word # file else: yield match.start(), Name.Variable, word tokens = { 'root': [ (r'REBOL', Generic.Strong, 'script'), (r'R', Comment), (r'[^R]+', Comment), ], 'script': [ (r'\s+', Text), (r'#"', String.Char, 'char'), (r'#{[0-9a-fA-F]*}', Number.Hex), (r'2#{', Number.Hex, 'bin2'), (r'64#{[0-9a-zA-Z+/=\s]*}', Number.Hex), (r'"', String, 'string'), (r'{', String, 'string2'), (r';#+.*\n', Comment.Special), (r';\*+.*\n', Comment.Preproc), (r';.*\n', Comment), (r'%"', Name.Decorator, 'stringFile'), (r'%[^(\^{^")\s\[\]]+', Name.Decorator), (r'[+-]?([a-zA-Z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money (r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time (r'\d+\-[0-9a-zA-Z]+\-\d+(\/\d+\:\d+(\:\d+)?' r'([\.\d+]?([+-]?\d+:\d+)?)?)?', String.Other), # date (r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple (r'\d+[xX]\d+', Keyword.Constant), # pair (r'[+-]?\d+(\'\d+)?([\.,]\d*)?[eE][+-]?\d+', Number.Float), (r'[+-]?\d+(\'\d+)?[\.,]\d*', Number.Float), (r'[+-]?\d+(\'\d+)?', Number), (r'[\[\]\(\)]', Generic.Strong), (r'[a-zA-Z]+[^(\^{"\s:)]*://[^(\^{"\s)]*', Name.Decorator), # url (r'mailto:[^(\^{"@\s)]+@[^(\^{"@\s)]+', Name.Decorator), # url (r'[^(\^{"@\s)]+@[^(\^{"@\s)]+', Name.Decorator), # email (r'comment\s', Comment, 'comment'), (r'/[^(\^{^")\s/[\]]*', Name.Attribute), (r'([^(\^{^")\s/[\]]+)(?=[:({"\s/\[\]])', word_callback), (r'<[a-zA-Z0-9:._-]*>', Name.Tag), (r'<[^(<>\s")]+', Name.Tag, 'tag'), (r'([^(\^{^")\s]+)', Text), ], 'string': [ (r'[^(\^")]+', String), (escape_re, String.Escape), (r'[\(|\)]+', String), (r'\^.', String.Escape), (r'"', String, '#pop'), ], 'string2': [ (r'[^(\^{^})]+', String), (escape_re, String.Escape), (r'[\(|\)]+', String), (r'\^.', String.Escape), (r'{', String, '#push'), (r'}', String, '#pop'), ], 'stringFile': [ (r'[^(\^")]+', Name.Decorator), (escape_re, Name.Decorator), (r'\^.', Name.Decorator), (r'"', Name.Decorator, '#pop'), ], 'char': [ (escape_re + '"', String.Char, '#pop'), (r'\^."', String.Char, '#pop'), (r'."', String.Char, '#pop'), ], 'tag': [ (escape_re, Name.Tag), (r'"', Name.Tag, 'tagString'), (r'[^(<>\r\n")]+', Name.Tag), (r'>', Name.Tag, '#pop'), ], 'tagString': [ (r'[^(\^")]+', Name.Tag), (escape_re, Name.Tag), (r'[\(|\)]+', Name.Tag), (r'\^.', Name.Tag), (r'"', Name.Tag, '#pop'), ], 'tuple': [ (r'(\d+\.)+', Keyword.Constant), (r'\d+', Keyword.Constant, '#pop'), ], 'bin2': [ (r'\s+', Number.Hex), (r'([0-1]\s*){8}', Number.Hex), (r'}', Number.Hex, '#pop'), ], 'comment': [ (r'"', Comment, 'commentString1'), (r'{', Comment, 'commentString2'), (r'\[', Comment, 'commentBlock'), (r'[^(\s{\"\[]+', Comment, '#pop'), ], 'commentString1': [ (r'[^(\^")]+', Comment), (escape_re, Comment), (r'[\(|\)]+', Comment), (r'\^.', Comment), (r'"', Comment, '#pop'), ], 'commentString2': [ (r'[^(\^{^})]+', Comment), (escape_re, Comment), (r'[\(|\)]+', Comment), (r'\^.', Comment), (r'{', Comment, '#push'), (r'}', Comment, '#pop'), ], 'commentBlock': [ (r'\[', Comment, '#push'), (r'\]', Comment, '#pop'), (r'[^(\[\])]+', Comment), ], } class ABAPLexer(RegexLexer): """ Lexer for ABAP, SAP's integrated language. *New in Pygments 1.1.* """ name = 'ABAP' aliases = ['abap'] filenames = ['*.abap'] mimetypes = ['text/x-abap'] flags = re.IGNORECASE | re.MULTILINE tokens = { 'common': [ (r'\s+', Text), (r'^\*.*$', Comment.Single), (r'\".*?\n', Comment.Single), ], 'variable-names': [ (r'<[\S_]+>', Name.Variable), (r'\w[\w~]*(?:(\[\])|->\*)?', Name.Variable), ], 'root': [ include('common'), #function calls (r'(CALL\s+(?:BADI|CUSTOMER-FUNCTION|FUNCTION))(\s+)(\'?\S+\'?)', bygroups(Keyword, Text, Name.Function)), (r'(CALL\s+(?:DIALOG|SCREEN|SUBSCREEN|SELECTION-SCREEN|' r'TRANSACTION|TRANSFORMATION))\b', Keyword), (r'(FORM|PERFORM)(\s+)(\w+)', bygroups(Keyword, Text, Name.Function)), (r'(PERFORM)(\s+)(\()(\w+)(\))', bygroups(Keyword, Text, Punctuation, Name.Variable, Punctuation )), (r'(MODULE)(\s+)(\S+)(\s+)(INPUT|OUTPUT)', bygroups(Keyword, Text, Name.Function, Text, Keyword)), # method implementation (r'(METHOD)(\s+)([\w~]+)', bygroups(Keyword, Text, Name.Function)), # method calls (r'(\s+)([\w\-]+)([=\-]>)([\w\-~]+)', bygroups(Text, Name.Variable, Operator, Name.Function)), # call methodnames returning style (r'(?<=(=|-)>)([\w\-~]+)(?=\()', Name.Function), # keywords with dashes in them. # these need to be first, because for instance the -ID part # of MESSAGE-ID wouldn't get highlighted if MESSAGE was # first in the list of keywords. (r'(ADD-CORRESPONDING|AUTHORITY-CHECK|' r'CLASS-DATA|CLASS-EVENTS|CLASS-METHODS|CLASS-POOL|' r'DELETE-ADJACENT|DIVIDE-CORRESPONDING|' r'EDITOR-CALL|ENHANCEMENT-POINT|ENHANCEMENT-SECTION|EXIT-COMMAND|' r'FIELD-GROUPS|FIELD-SYMBOLS|FUNCTION-POOL|' r'INTERFACE-POOL|INVERTED-DATE|' r'LOAD-OF-PROGRAM|LOG-POINT|' r'MESSAGE-ID|MOVE-CORRESPONDING|MULTIPLY-CORRESPONDING|' r'NEW-LINE|NEW-PAGE|NEW-SECTION|NO-EXTENSION|' r'OUTPUT-LENGTH|PRINT-CONTROL|' r'SELECT-OPTIONS|START-OF-SELECTION|SUBTRACT-CORRESPONDING|' r'SYNTAX-CHECK|SYSTEM-EXCEPTIONS|' r'TYPE-POOL|TYPE-POOLS' r')\b', Keyword), # keyword kombinations (r'CREATE\s+(PUBLIC|PRIVATE|DATA|OBJECT)|' r'((PUBLIC|PRIVATE|PROTECTED)\s+SECTION|' r'(TYPE|LIKE)(\s+(LINE\s+OF|REF\s+TO|' r'(SORTED|STANDARD|HASHED)\s+TABLE\s+OF))?|' r'FROM\s+(DATABASE|MEMORY)|CALL\s+METHOD|' r'(GROUP|ORDER) BY|HAVING|SEPARATED BY|' r'GET\s+(BADI|BIT|CURSOR|DATASET|LOCALE|PARAMETER|' r'PF-STATUS|(PROPERTY|REFERENCE)\s+OF|' r'RUN\s+TIME|TIME\s+(STAMP)?)?|' r'SET\s+(BIT|BLANK\s+LINES|COUNTRY|CURSOR|DATASET|EXTENDED\s+CHECK|' r'HANDLER|HOLD\s+DATA|LANGUAGE|LEFT\s+SCROLL-BOUNDARY|' r'LOCALE|MARGIN|PARAMETER|PF-STATUS|PROPERTY\s+OF|' r'RUN\s+TIME\s+(ANALYZER|CLOCK\s+RESOLUTION)|SCREEN|' r'TITLEBAR|UPADTE\s+TASK\s+LOCAL|USER-COMMAND)|' r'CONVERT\s+((INVERTED-)?DATE|TIME|TIME\s+STAMP|TEXT)|' r'(CLOSE|OPEN)\s+(DATASET|CURSOR)|' r'(TO|FROM)\s+(DATA BUFFER|INTERNAL TABLE|MEMORY ID|' r'DATABASE|SHARED\s+(MEMORY|BUFFER))|' r'DESCRIBE\s+(DISTANCE\s+BETWEEN|FIELD|LIST|TABLE)|' r'FREE\s(MEMORY|OBJECT)?|' r'PROCESS\s+(BEFORE\s+OUTPUT|AFTER\s+INPUT|' r'ON\s+(VALUE-REQUEST|HELP-REQUEST))|' r'AT\s+(LINE-SELECTION|USER-COMMAND|END\s+OF|NEW)|' r'AT\s+SELECTION-SCREEN(\s+(ON(\s+(BLOCK|(HELP|VALUE)-REQUEST\s+FOR|' r'END\s+OF|RADIOBUTTON\s+GROUP))?|OUTPUT))?|' r'SELECTION-SCREEN:?\s+((BEGIN|END)\s+OF\s+((TABBED\s+)?BLOCK|LINE|' r'SCREEN)|COMMENT|FUNCTION\s+KEY|' r'INCLUDE\s+BLOCKS|POSITION|PUSHBUTTON|' r'SKIP|ULINE)|' r'LEAVE\s+(LIST-PROCESSING|PROGRAM|SCREEN|' r'TO LIST-PROCESSING|TO TRANSACTION)' r'(ENDING|STARTING)\s+AT|' r'FORMAT\s+(COLOR|INTENSIFIED|INVERSE|HOTSPOT|INPUT|FRAMES|RESET)|' r'AS\s+(CHECKBOX|SUBSCREEN|WINDOW)|' r'WITH\s+(((NON-)?UNIQUE)?\s+KEY|FRAME)|' r'(BEGIN|END)\s+OF|' r'DELETE(\s+ADJACENT\s+DUPLICATES\sFROM)?|' r'COMPARING(\s+ALL\s+FIELDS)?|' r'INSERT(\s+INITIAL\s+LINE\s+INTO|\s+LINES\s+OF)?|' r'IN\s+((BYTE|CHARACTER)\s+MODE|PROGRAM)|' r'END-OF-(DEFINITION|PAGE|SELECTION)|' r'WITH\s+FRAME(\s+TITLE)|' # simple kombinations r'AND\s+(MARK|RETURN)|CLIENT\s+SPECIFIED|CORRESPONDING\s+FIELDS\s+OF|' r'IF\s+FOUND|FOR\s+EVENT|INHERITING\s+FROM|LEAVE\s+TO\s+SCREEN|' r'LOOP\s+AT\s+(SCREEN)?|LOWER\s+CASE|MATCHCODE\s+OBJECT|MODIF\s+ID|' r'MODIFY\s+SCREEN|NESTING\s+LEVEL|NO\s+INTERVALS|OF\s+STRUCTURE|' r'RADIOBUTTON\s+GROUP|RANGE\s+OF|REF\s+TO|SUPPRESS DIALOG|' r'TABLE\s+OF|UPPER\s+CASE|TRANSPORTING\s+NO\s+FIELDS|' r'VALUE\s+CHECK|VISIBLE\s+LENGTH|HEADER\s+LINE)\b', Keyword), # single word keywords. (r'(^|(?<=(\s|\.)))(ABBREVIATED|ADD|ALIASES|APPEND|ASSERT|' r'ASSIGN(ING)?|AT(\s+FIRST)?|' r'BACK|BLOCK|BREAK-POINT|' r'CASE|CATCH|CHANGING|CHECK|CLASS|CLEAR|COLLECT|COLOR|COMMIT|' r'CREATE|COMMUNICATION|COMPONENTS?|COMPUTE|CONCATENATE|CONDENSE|' r'CONSTANTS|CONTEXTS|CONTINUE|CONTROLS|' r'DATA|DECIMALS|DEFAULT|DEFINE|DEFINITION|DEFERRED|DEMAND|' r'DETAIL|DIRECTORY|DIVIDE|DO|' r'ELSE(IF)?|ENDAT|ENDCASE|ENDCLASS|ENDDO|ENDFORM|ENDFUNCTION|' r'ENDIF|ENDLOOP|ENDMETHOD|ENDMODULE|ENDSELECT|ENDTRY|' r'ENHANCEMENT|EVENTS|EXCEPTIONS|EXIT|EXPORT|EXPORTING|EXTRACT|' r'FETCH|FIELDS?|FIND|FOR|FORM|FORMAT|FREE|FROM|' r'HIDE|' r'ID|IF|IMPORT|IMPLEMENTATION|IMPORTING|IN|INCLUDE|INCLUDING|' r'INDEX|INFOTYPES|INITIALIZATION|INTERFACE|INTERFACES|INTO|' r'LENGTH|LINES|LOAD|LOCAL|' r'JOIN|' r'KEY|' r'MAXIMUM|MESSAGE|METHOD[S]?|MINIMUM|MODULE|MODIFY|MOVE|MULTIPLY|' r'NODES|' r'OBLIGATORY|OF|OFF|ON|OVERLAY|' r'PACK|PARAMETERS|PERCENTAGE|POSITION|PROGRAM|PROVIDE|PUBLIC|PUT|' r'RAISE|RAISING|RANGES|READ|RECEIVE|REFRESH|REJECT|REPORT|RESERVE|' r'RESUME|RETRY|RETURN|RETURNING|RIGHT|ROLLBACK|' r'SCROLL|SEARCH|SELECT|SHIFT|SINGLE|SKIP|SORT|SPLIT|STATICS|STOP|' r'SUBMIT|SUBTRACT|SUM|SUMMARY|SUMMING|SUPPLY|' r'TABLE|TABLES|TIMES|TITLE|TO|TOP-OF-PAGE|TRANSFER|TRANSLATE|TRY|TYPES|' r'ULINE|UNDER|UNPACK|UPDATE|USING|' r'VALUE|VALUES|VIA|' r'WAIT|WHEN|WHERE|WHILE|WITH|WINDOW|WRITE)\b', Keyword), # builtins (r'(abs|acos|asin|atan|' r'boolc|boolx|bit_set|' r'char_off|charlen|ceil|cmax|cmin|condense|contains|' r'contains_any_of|contains_any_not_of|concat_lines_of|cos|cosh|' r'count|count_any_of|count_any_not_of|' r'dbmaxlen|distance|' r'escape|exp|' r'find|find_end|find_any_of|find_any_not_of|floor|frac|from_mixed|' r'insert|' r'lines|log|log10|' r'match|matches|' r'nmax|nmin|numofchar|' r'repeat|replace|rescale|reverse|round|' r'segment|shift_left|shift_right|sign|sin|sinh|sqrt|strlen|' r'substring|substring_after|substring_from|substring_before|substring_to|' r'tan|tanh|to_upper|to_lower|to_mixed|translate|trunc|' r'xstrlen)(\()\b', bygroups(Name.Builtin, Punctuation)), (r'&[0-9]', Name), (r'[0-9]+', Number.Integer), # operators which look like variable names before # parsing variable names. (r'(?<=(\s|.))(AND|EQ|NE|GT|LT|GE|LE|CO|CN|CA|NA|CS|NOT|NS|CP|NP|' r'BYTE-CO|BYTE-CN|BYTE-CA|BYTE-NA|BYTE-CS|BYTE-NS|' r'IS\s+(NOT\s+)?(INITIAL|ASSIGNED|REQUESTED|BOUND))\b', Operator), include('variable-names'), # standard oparators after variable names, # because < and > are part of field symbols. (r'[?*<>=\-+]', Operator), (r"'(''|[^'])*'", String.Single), (r'[/;:()\[\],\.]', Punctuation) ], } class NewspeakLexer(RegexLexer): """ For `Newspeak <http://newspeaklanguage.org/>` syntax. """ name = 'Newspeak' filenames = ['*.ns2'] aliases = ['newspeak', ] mimetypes = ['text/x-newspeak'] tokens = { 'root' : [ (r'\b(Newsqueak2)\b',Keyword.Declaration), (r"'[^']*'",String), (r'\b(class)(\s+)([a-zA-Z0-9_]+)(\s*)', bygroups(Keyword.Declaration,Text,Name.Class,Text)), (r'\b(mixin|self|super|private|public|protected|nil|true|false)\b', Keyword), (r'([a-zA-Z0-9_]+\:)(\s*)([a-zA-Z_]\w+)', bygroups(Name.Function,Text,Name.Variable)), (r'([a-zA-Z0-9_]+)(\s*)(=)', bygroups(Name.Attribute,Text,Operator)), (r'<[a-zA-Z0-9_]+>', Comment.Special), include('expressionstat'), include('whitespace') ], 'expressionstat': [ (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), (r'\d+', Number.Integer), (r':\w+',Name.Variable), (r'(\w+)(::)', bygroups(Name.Variable, Operator)), (r'\w+:', Name.Function), (r'\w+', Name.Variable), (r'\(|\)', Punctuation), (r'\[|\]', Punctuation), (r'\{|\}', Punctuation), (r'(\^|\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-|:)', Operator), (r'\.|;', Punctuation), include('whitespace'), include('literals'), ], 'literals': [ (r'\$.', String), (r"'[^']*'", String), (r"#'[^']*'", String.Symbol), (r"#\w+:?", String.Symbol), (r"#(\+|\/|~|\*|<|>|=|@|%|\||&|\?|!|,|-)+", String.Symbol) ], 'whitespace' : [ (r'\s+', Text), (r'"[^"]*"', Comment) ] } class GherkinLexer(RegexLexer): """ For `Gherkin <http://github.com/aslakhellesoy/gherkin/>` syntax. *New in Pygments 1.2.* """ name = 'Gherkin' aliases = ['Cucumber', 'cucumber', 'Gherkin', 'gherkin'] filenames = ['*.feature'] mimetypes = ['text/x-gherkin'] feature_keywords = ur'^(기능|機能|功能|フィーチャ|خاصية|תכונה|Функціонал|Функционалност|Функционал|Фича|Особина|Могућност|Özellik|Właściwość|Tính năng|Trajto|Savybė|Požiadavka|Požadavek|Osobina|Ominaisuus|Omadus|OH HAI|Mogućnost|Mogucnost|Jellemző|Fīča|Funzionalità|Funktionalität|Funkcionalnost|Funkcionalitāte|Funcționalitate|Functionaliteit|Functionalitate|Funcionalitat|Funcionalidade|Fonctionnalité|Fitur|Feature|Egenskap|Egenskab|Crikey|Característica|Arwedd)(:)(.*)$' feature_element_keywords = ur'^(\s*)(시나리오 개요|시나리오|배경|背景|場景大綱|場景|场景大纲|场景|劇本大綱|劇本|テンプレ|シナリオテンプレート|シナリオテンプレ|シナリオアウトライン|シナリオ|سيناريو مخطط|سيناريو|الخلفية|תרחיש|תבנית תרחיש|רקע|Тарих|Сценарій|Сценарио|Сценарий структураси|Сценарий|Структура сценарію|Структура сценарија|Структура сценария|Скица|Рамка на сценарий|Пример|Предыстория|Предистория|Позадина|Передумова|Основа|Концепт|Контекст|Założenia|Wharrimean is|Tình huống|The thing of it is|Tausta|Taust|Tapausaihio|Tapaus|Szenariogrundriss|Szenario|Szablon scenariusza|Stsenaarium|Struktura scenarija|Skica|Skenario konsep|Skenario|Situācija|Senaryo taslağı|Senaryo|Scénář|Scénario|Schema dello scenario|Scenārijs pēc parauga|Scenārijs|Scenár|Scenaro|Scenariusz|Scenariul de şablon|Scenariul de sablon|Scenariu|Scenario Outline|Scenario Amlinellol|Scenario|Scenarijus|Scenarijaus šablonas|Scenarij|Scenarie|Rerefons|Raamstsenaarium|Primer|Pozadí|Pozadina|Pozadie|Plan du scénario|Plan du Scénario|Osnova scénáře|Osnova|Náčrt Scénáře|Náčrt Scenáru|Mate|MISHUN SRSLY|MISHUN|Kịch bản|Konturo de la scenaro|Kontext|Konteksts|Kontekstas|Kontekst|Koncept|Khung tình huống|Khung kịch bản|Háttér|Grundlage|Geçmiş|Forgatókönyv vázlat|Forgatókönyv|Fono|Esquema do Cenário|Esquema do Cenario|Esquema del escenario|Esquema de l\'escenari|Escenario|Escenari|Dis is what went down|Dasar|Contexto|Contexte|Contesto|Condiţii|Conditii|Cenário|Cenario|Cefndir|Bối cảnh|Blokes|Bakgrunn|Bakgrund|Baggrund|Background|B4|Antecedents|Antecedentes|All y\'all|Achtergrond|Abstrakt Scenario|Abstract Scenario)(:)(.*)$' examples_keywords = ur'^(\s*)(예|例子|例|サンプル|امثلة|דוגמאות|Сценарији|Примери|Приклади|Мисоллар|Значения|Örnekler|Voorbeelden|Variantai|Tapaukset|Scenarios|Scenariji|Scenarijai|Příklady|Példák|Príklady|Przykłady|Primjeri|Primeri|Piemēri|Pavyzdžiai|Paraugs|Juhtumid|Exemplos|Exemples|Exemplele|Exempel|Examples|Esempi|Enghreifftiau|Ekzemploj|Eksempler|Ejemplos|EXAMPLZ|Dữ liệu|Contoh|Cobber|Beispiele)(:)(.*)$' step_keywords = ur'^(\s*)(하지만|조건|먼저|만일|만약|단|그리고|그러면|那麼|那么|而且|當|当|前提|假設|假如|但是|但し|並且|もし|ならば|ただし|しかし|かつ|و |متى |لكن |عندما |ثم |بفرض |اذاً |כאשר |וגם |בהינתן |אזי |אז |אבל |Якщо |Унда |То |Припустимо, що |Припустимо |Онда |Но |Нехай |Лекин |Когато |Када |Кад |К тому же |И |Задато |Задати |Задате |Если |Допустим |Дадено |Ва |Бирок |Аммо |Али |Але |Агар |А |І |Și |És |Zatati |Zakładając |Zadato |Zadate |Zadano |Zadani |Zadan |Youse know when youse got |Youse know like when |Yna |Ya know how |Ya gotta |Y |Wun |Wtedy |When y\'all |When |Wenn |WEN |Và |Ve |Und |Un |Thì |Then y\'all |Then |Tapi |Tak |Tada |Tad |Så |Stel |Soit |Siis |Si |Sed |Se |Quando |Quand |Quan |Pryd |Pokud |Pokiaľ |Però |Pero |Pak |Oraz |Onda |Ond |Oletetaan |Og |Och |O zaman |Når |När |Niin |Nhưng |N |Mutta |Men |Mas |Maka |Majd |Mais |Maar |Ma |Lorsque |Lorsqu\'|Kun |Kuid |Kui |Khi |Keď |Ketika |Když |Kaj |Kai |Kada |Kad |Jeżeli |Ja |Ir |I CAN HAZ |I |Ha |Givun |Givet |Given y\'all |Given |Gitt |Gegeven |Gegeben sei |Fakat |Eğer ki |Etant donné |Et |Então |Entonces |Entao |En |Eeldades |E |Duota |Dun |Donitaĵo |Donat |Donada |Do |Diyelim ki |Dengan |Den youse gotta |De |Dato |Dar |Dann |Dan |Dado |Dacă |Daca |DEN |Când |Cuando |Cho |Cept |Cand |Cal |But y\'all |But |Buh |Biết |Bet |BUT |Atès |Atunci |Atesa |Anrhegedig a |Angenommen |And y\'all |And |An |Ama |Als |Alors |Allora |Ali |Aleshores |Ale |Akkor |Aber |AN |A také |A |\* )' tokens = { 'comments': [ (r'#.*$', Comment), ], 'feature_elements' : [ (step_keywords, Keyword, "step_content_stack"), include('comments'), (r"(\s|.)", Name.Function), ], 'feature_elements_on_stack' : [ (step_keywords, Keyword, "#pop:2"), include('comments'), (r"(\s|.)", Name.Function), ], 'examples_table': [ (r"\s+\|", Keyword, 'examples_table_header'), include('comments'), (r"(\s|.)", Name.Function), ], 'examples_table_header': [ (r"\s+\|\s*$", Keyword, "#pop:2"), include('comments'), (r"\s*\|", Keyword), (r"[^\|]", Name.Variable), ], 'scenario_sections_on_stack': [ (feature_element_keywords, bygroups(Name.Function, Keyword, Keyword, Name.Function), "feature_elements_on_stack"), ], 'narrative': [ include('scenario_sections_on_stack'), (r"(\s|.)", Name.Function), ], 'table_vars': [ (r'(<[^>]+>)', Name.Variable), ], 'numbers': [ (r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', String), ], 'string': [ include('table_vars'), (r'(\s|.)', String), ], 'py_string': [ (r'"""', Keyword, "#pop"), include('string'), ], 'step_content_root':[ (r"$", Keyword, "#pop"), include('step_content'), ], 'step_content_stack':[ (r"$", Keyword, "#pop:2"), include('step_content'), ], 'step_content':[ (r'"', Name.Function, "double_string"), include('table_vars'), include('numbers'), include('comments'), (r'(\s|.)', Name.Function), ], 'table_content': [ (r"\s+\|\s*$", Keyword, "#pop"), include('comments'), (r"\s*\|", Keyword), include('string'), ], 'double_string': [ (r'"', Name.Function, "#pop"), include('string'), ], 'root': [ (r'\n', Name.Function), include('comments'), (r'"""', Keyword, "py_string"), (r'\s+\|', Keyword, 'table_content'), (r'"', Name.Function, "double_string"), include('table_vars'), include('numbers'), (r'(\s*)(@[^@\r\n\t ]+)', bygroups(Name.Function, Name.Tag)), (step_keywords, bygroups(Name.Function, Keyword), 'step_content_root'), (feature_keywords, bygroups(Keyword, Keyword, Name.Function), 'narrative'), (feature_element_keywords, bygroups(Name.Function, Keyword, Keyword, Name.Function), 'feature_elements'), (examples_keywords, bygroups(Name.Function, Keyword, Keyword, Name.Function), 'examples_table'), (r'(\s|.)', Name.Function), ] } class AsymptoteLexer(RegexLexer): """ For `Asymptote <http://asymptote.sf.net/>`_ source code. *New in Pygments 1.2.* """ name = 'Asymptote' aliases = ['asy', 'asymptote'] filenames = ['*.asy'] mimetypes = ['text/x-asymptote'] #: optional Comment or Whitespace _ws = r'(?:\s|//.*?\n|/\*.*?\*/)+' tokens = { 'whitespace': [ (r'\n', Text), (r'\s+', Text), (r'\\\n', Text), # line continuation (r'//(\n|(.|\n)*?[^\\]\n)', Comment), (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment), ], 'statements': [ # simple string (TeX friendly) (r'"(\\\\|\\"|[^"])*"', String), # C style string (with character escapes) (r"'", String, 'string'), (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float), (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), (r'0x[0-9a-fA-F]+[Ll]?', Number.Hex), (r'0[0-7]+[Ll]?', Number.Oct), (r'\d+[Ll]?', Number.Integer), (r'[~!%^&*+=|?:<>/-]', Operator), (r'[()\[\],.]', Punctuation), (r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)), (r'(and|controls|tension|atleast|curl|if|else|while|for|do|' r'return|break|continue|struct|typedef|new|access|import|' r'unravel|from|include|quote|static|public|private|restricted|' r'this|explicit|true|false|null|cycle|newframe|operator)\b', Keyword), # Since an asy-type-name can be also an asy-function-name, # in the following we test if the string " [a-zA-Z]" follows # the Keyword.Type. # Of course it is not perfect ! (r'(Braid|FitResult|Label|Legend|TreeNode|abscissa|arc|arrowhead|' r'binarytree|binarytreeNode|block|bool|bool3|bounds|bqe|circle|' r'conic|coord|coordsys|cputime|ellipse|file|filltype|frame|grid3|' r'guide|horner|hsv|hyperbola|indexedTransform|int|inversion|key|' r'light|line|linefit|marginT|marker|mass|object|pair|parabola|path|' r'path3|pen|picture|point|position|projection|real|revolution|' r'scaleT|scientific|segment|side|slice|splitface|string|surface|' r'tensionSpecifier|ticklocate|ticksgridT|tickvalues|transform|' r'transformation|tree|triangle|trilinear|triple|vector|' r'vertex|void)(?=([ ]{1,}[a-zA-Z]))', Keyword.Type), # Now the asy-type-name which are not asy-function-name # except yours ! # Perhaps useless (r'(Braid|FitResult|TreeNode|abscissa|arrowhead|block|bool|bool3|' r'bounds|coord|frame|guide|horner|int|linefit|marginT|pair|pen|' r'picture|position|real|revolution|slice|splitface|ticksgridT|' r'tickvalues|tree|triple|vertex|void)\b', Keyword.Type), ('[a-zA-Z_][a-zA-Z0-9_]*:(?!:)', Name.Label), ('[a-zA-Z_][a-zA-Z0-9_]*', Name), ], 'root': [ include('whitespace'), # functions (r'((?:[a-zA-Z0-9_*\s])+?(?:\s|\*))' # return arguments r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name r'(\s*\([^;]*?\))' # signature r'(' + _ws + r')({)', bygroups(using(this), Name.Function, using(this), using(this), Punctuation), 'function'), # function declarations (r'((?:[a-zA-Z0-9_*\s])+?(?:\s|\*))' # return arguments r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name r'(\s*\([^;]*?\))' # signature r'(' + _ws + r')(;)', bygroups(using(this), Name.Function, using(this), using(this), Punctuation)), ('', Text, 'statement'), ], 'statement' : [ include('whitespace'), include('statements'), ('[{}]', Punctuation), (';', Punctuation, '#pop'), ], 'function': [ include('whitespace'), include('statements'), (';', Punctuation), ('{', Punctuation, '#push'), ('}', Punctuation, '#pop'), ], 'string': [ (r"'", String, '#pop'), (r'\\([\\abfnrtv"\'?]|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), (r'\n', String), (r"[^\\'\n]+", String), # all other characters (r'\\\n', String), (r'\\n', String), # line continuation (r'\\', String), # stray backslash ] } def get_tokens_unprocessed(self, text): from pygments.lexers._asybuiltins import ASYFUNCNAME, ASYVARNAME for index, token, value in \ RegexLexer.get_tokens_unprocessed(self, text): if token is Name and value in ASYFUNCNAME: token = Name.Function elif token is Name and value in ASYVARNAME: token = Name.Variable yield index, token, value class PostScriptLexer(RegexLexer): """ Lexer for PostScript files. The PostScript Language Reference published by Adobe at <http://partners.adobe.com/public/developer/en/ps/PLRM.pdf> is the authority for this. *New in Pygments 1.4.* """ name = 'PostScript' aliases = ['postscript', 'postscr'] filenames = ['*.ps', '*.eps'] mimetypes = ['application/postscript'] delimiter = r'\(\)\<\>\[\]\{\}\/\%\s' delimiter_end = r'(?=[%s])' % delimiter valid_name_chars = r'[^%s]' % delimiter valid_name = r"%s+%s" % (valid_name_chars, delimiter_end) tokens = { 'root': [ # All comment types (r'^%!.+\n', Comment.Preproc), (r'%%.*\n', Comment.Special), (r'(^%.*\n){2,}', Comment.Multiline), (r'%.*\n', Comment.Single), # String literals are awkward; enter separate state. (r'\(', String, 'stringliteral'), (r'[\{\}(\<\<)(\>\>)\[\]]', Punctuation), # Numbers (r'<[0-9A-Fa-f]+>' + delimiter_end, Number.Hex), # Slight abuse: use Oct to signify any explicit base system (r'[0-9]+\#(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)' r'((e|E)[0-9]+)?' + delimiter_end, Number.Oct), (r'(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)((e|E)[0-9]+)?' + delimiter_end, Number.Float), (r'(\-|\+)?[0-9]+' + delimiter_end, Number.Integer), # References (r'\/%s' % valid_name, Name.Variable), # Names (valid_name, Name.Function), # Anything else is executed # These keywords taken from # <http://www.math.ubc.ca/~cass/graphics/manual/pdf/a1.pdf> # Is there an authoritative list anywhere that doesn't involve # trawling documentation? (r'(false|true)' + delimiter_end, Keyword.Constant), # Conditionals / flow control (r'(eq|ne|ge|gt|le|lt|and|or|not|if|ifelse|for|forall)' + delimiter_end, Keyword.Reserved), ('(abs|add|aload|arc|arcn|array|atan|begin|bind|ceiling|charpath|' 'clip|closepath|concat|concatmatrix|copy|cos|currentlinewidth|' 'currentmatrix|currentpoint|curveto|cvi|cvs|def|defaultmatrix|' 'dict|dictstackoverflow|div|dtransform|dup|end|exch|exec|exit|exp|' 'fill|findfont|floor|get|getinterval|grestore|gsave|gt|' 'identmatrix|idiv|idtransform|index|invertmatrix|itransform|' 'length|lineto|ln|load|log|loop|matrix|mod|moveto|mul|neg|newpath|' 'pathforall|pathbbox|pop|print|pstack|put|quit|rand|rangecheck|' 'rcurveto|repeat|restore|rlineto|rmoveto|roll|rotate|round|run|' 'save|scale|scalefont|setdash|setfont|setgray|setlinecap|' 'setlinejoin|setlinewidth|setmatrix|setrgbcolor|shfill|show|' 'showpage|sin|sqrt|stack|stringwidth|stroke|strokepath|sub|' 'syntaxerror|transform|translate|truncate|typecheck|undefined|' 'undefinedfilename|undefinedresult)' + delimiter_end, Name.Builtin), (r'\s+', Text), ], 'stringliteral': [ (r'[^\(\)\\]+', String), (r'\\', String.Escape, 'escape'), (r'\(', String, '#push'), (r'\)', String, '#pop'), ], 'escape': [ (r'([0-8]{3}|n|r|t|b|f|\\|\(|\))?', String.Escape, '#pop'), ], } class AutohotkeyLexer(RegexLexer): """ For `autohotkey <http://www.autohotkey.com/>`_ source code. *New in Pygments 1.4.* """ name = 'autohotkey' aliases = ['ahk', 'autohotkey'] filenames = ['*.ahk', '*.ahkl'] mimetypes = ['text/x-autohotkey'] tokens = { 'root': [ (r'^(\s*)(/\*)', bygroups(Text, Comment.Multiline), 'incomment'), (r'^(\s*)(\()', bygroups(Text, Generic), 'incontinuation'), (r'\s+;.*?$', Comment.Singleline), (r'^;.*?$', Comment.Singleline), (r'[]{}(),;[]', Punctuation), (r'(in|is|and|or|not)\b', Operator.Word), (r'\%[a-zA-Z_#@$][a-zA-Z0-9_#@$]*\%', Name.Variable), (r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator), include('commands'), include('labels'), include('builtInFunctions'), include('builtInVariables'), (r'"', String, combined('stringescape', 'dqs')), include('numbers'), (r'[a-zA-Z_#@$][a-zA-Z0-9_#@$]*', Name), (r'\\|\'', Text), (r'\`([\,\%\`abfnrtv\-\+;])', String.Escape), include('garbage'), ], 'incomment': [ (r'^\s*\*/', Comment.Multiline, '#pop'), (r'[^*/]', Comment.Multiline), (r'[*/]', Comment.Multiline) ], 'incontinuation': [ (r'^\s*\)', Generic, '#pop'), (r'[^)]', Generic), (r'[)]', Generic), ], 'commands': [ (r'(?i)^(\s*)(global|local|static|' r'#AllowSameLineComments|#ClipboardTimeout|#CommentFlag|' r'#ErrorStdOut|#EscapeChar|#HotkeyInterval|#HotkeyModifierTimeout|' r'#Hotstring|#IfWinActive|#IfWinExist|#IfWinNotActive|' r'#IfWinNotExist|#IncludeAgain|#Include|#InstallKeybdHook|' r'#InstallMouseHook|#KeyHistory|#LTrim|#MaxHotkeysPerInterval|' r'#MaxMem|#MaxThreads|#MaxThreadsBuffer|#MaxThreadsPerHotkey|' r'#NoEnv|#NoTrayIcon|#Persistent|#SingleInstance|#UseHook|' r'#WinActivateForce|AutoTrim|BlockInput|Break|Click|ClipWait|' r'Continue|Control|ControlClick|ControlFocus|ControlGetFocus|' r'ControlGetPos|ControlGetText|ControlGet|ControlMove|ControlSend|' r'ControlSendRaw|ControlSetText|CoordMode|Critical|' r'DetectHiddenText|DetectHiddenWindows|Drive|DriveGet|' r'DriveSpaceFree|Edit|Else|EnvAdd|EnvDiv|EnvGet|EnvMult|EnvSet|' r'EnvSub|EnvUpdate|Exit|ExitApp|FileAppend|' r'FileCopy|FileCopyDir|FileCreateDir|FileCreateShortcut|' r'FileDelete|FileGetAttrib|FileGetShortcut|FileGetSize|' r'FileGetTime|FileGetVersion|FileInstall|FileMove|FileMoveDir|' r'FileRead|FileReadLine|FileRecycle|FileRecycleEmpty|' r'FileRemoveDir|FileSelectFile|FileSelectFolder|FileSetAttrib|' r'FileSetTime|FormatTime|GetKeyState|Gosub|Goto|GroupActivate|' r'GroupAdd|GroupClose|GroupDeactivate|Gui|GuiControl|' r'GuiControlGet|Hotkey|IfEqual|IfExist|IfGreaterOrEqual|IfGreater|' r'IfInString|IfLess|IfLessOrEqual|IfMsgBox|IfNotEqual|IfNotExist|' r'IfNotInString|IfWinActive|IfWinExist|IfWinNotActive|' r'IfWinNotExist|If |ImageSearch|IniDelete|IniRead|IniWrite|' r'InputBox|Input|KeyHistory|KeyWait|ListHotkeys|ListLines|' r'ListVars|Loop|Menu|MouseClickDrag|MouseClick|MouseGetPos|' r'MouseMove|MsgBox|OnExit|OutputDebug|Pause|PixelGetColor|' r'PixelSearch|PostMessage|Process|Progress|Random|RegDelete|' r'RegRead|RegWrite|Reload|Repeat|Return|RunAs|RunWait|Run|' r'SendEvent|SendInput|SendMessage|SendMode|SendPlay|SendRaw|Send|' r'SetBatchLines|SetCapslockState|SetControlDelay|' r'SetDefaultMouseSpeed|SetEnv|SetFormat|SetKeyDelay|' r'SetMouseDelay|SetNumlockState|SetScrollLockState|' r'SetStoreCapslockMode|SetTimer|SetTitleMatchMode|' r'SetWinDelay|SetWorkingDir|Shutdown|Sleep|Sort|SoundBeep|' r'SoundGet|SoundGetWaveVolume|SoundPlay|SoundSet|' r'SoundSetWaveVolume|SplashImage|SplashTextOff|SplashTextOn|' r'SplitPath|StatusBarGetText|StatusBarWait|StringCaseSense|' r'StringGetPos|StringLeft|StringLen|StringLower|StringMid|' r'StringReplace|StringRight|StringSplit|StringTrimLeft|' r'StringTrimRight|StringUpper|Suspend|SysGet|Thread|ToolTip|' r'Transform|TrayTip|URLDownloadToFile|While|WinActivate|' r'WinActivateBottom|WinClose|WinGetActiveStats|WinGetActiveTitle|' r'WinGetClass|WinGetPos|WinGetText|WinGetTitle|WinGet|WinHide|' r'WinKill|WinMaximize|WinMenuSelectItem|WinMinimizeAllUndo|' r'WinMinimizeAll|WinMinimize|WinMove|WinRestore|WinSetTitle|' r'WinSet|WinShow|WinWaitActive|WinWaitClose|WinWaitNotActive|' r'WinWait)\b', bygroups(Text, Name.Builtin)), ], 'builtInFunctions': [ (r'(?i)(Abs|ACos|Asc|ASin|ATan|Ceil|Chr|Cos|DllCall|Exp|FileExist|' r'Floor|GetKeyState|IL_Add|IL_Create|IL_Destroy|InStr|IsFunc|' r'IsLabel|Ln|Log|LV_Add|LV_Delete|LV_DeleteCol|LV_GetCount|' r'LV_GetNext|LV_GetText|LV_Insert|LV_InsertCol|LV_Modify|' r'LV_ModifyCol|LV_SetImageList|Mod|NumGet|NumPut|OnMessage|' r'RegExMatch|RegExReplace|RegisterCallback|Round|SB_SetIcon|' r'SB_SetParts|SB_SetText|Sin|Sqrt|StrLen|SubStr|Tan|TV_Add|' r'TV_Delete|TV_GetChild|TV_GetCount|TV_GetNext|TV_Get|' r'TV_GetParent|TV_GetPrev|TV_GetSelection|TV_GetText|TV_Modify|' r'VarSetCapacity|WinActive|WinExist|Object|ComObjActive|' r'ComObjArray|ComObjEnwrap|ComObjUnwrap|ComObjParameter|' r'ComObjType|ComObjConnect|ComObjCreate|ComObjGet|ComObjError|' r'ComObjValue|Insert|MinIndex|MaxIndex|Remove|SetCapacity|' r'GetCapacity|GetAddress|_NewEnum|FileOpen|Read|Write|ReadLine|' r'WriteLine|ReadNumType|WriteNumType|RawRead|RawWrite|Seek|Tell|' r'Close|Next|IsObject|StrPut|StrGet|Trim|LTrim|RTrim)\b', Name.Function), ], 'builtInVariables': [ (r'(?i)(A_AhkPath|A_AhkVersion|A_AppData|A_AppDataCommon|' r'A_AutoTrim|A_BatchLines|A_CaretX|A_CaretY|A_ComputerName|' r'A_ControlDelay|A_Cursor|A_DDDD|A_DDD|A_DD|A_DefaultMouseSpeed|' r'A_Desktop|A_DesktopCommon|A_DetectHiddenText|' r'A_DetectHiddenWindows|A_EndChar|A_EventInfo|A_ExitReason|' r'A_FormatFloat|A_FormatInteger|A_Gui|A_GuiEvent|A_GuiControl|' r'A_GuiControlEvent|A_GuiHeight|A_GuiWidth|A_GuiX|A_GuiY|A_Hour|' r'A_IconFile|A_IconHidden|A_IconNumber|A_IconTip|A_Index|' r'A_IPAddress1|A_IPAddress2|A_IPAddress3|A_IPAddress4|A_ISAdmin|' r'A_IsCompiled|A_IsCritical|A_IsPaused|A_IsSuspended|A_KeyDelay|' r'A_Language|A_LastError|A_LineFile|A_LineNumber|A_LoopField|' r'A_LoopFileAttrib|A_LoopFileDir|A_LoopFileExt|A_LoopFileFullPath|' r'A_LoopFileLongPath|A_LoopFileName|A_LoopFileShortName|' r'A_LoopFileShortPath|A_LoopFileSize|A_LoopFileSizeKB|' r'A_LoopFileSizeMB|A_LoopFileTimeAccessed|A_LoopFileTimeCreated|' r'A_LoopFileTimeModified|A_LoopReadLine|A_LoopRegKey|' r'A_LoopRegName|A_LoopRegSubkey|A_LoopRegTimeModified|' r'A_LoopRegType|A_MDAY|A_Min|A_MM|A_MMM|A_MMMM|A_Mon|A_MouseDelay|' r'A_MSec|A_MyDocuments|A_Now|A_NowUTC|A_NumBatchLines|A_OSType|' r'A_OSVersion|A_PriorHotkey|A_ProgramFiles|A_Programs|' r'A_ProgramsCommon|A_ScreenHeight|A_ScreenWidth|A_ScriptDir|' r'A_ScriptFullPath|A_ScriptName|A_Sec|A_Space|A_StartMenu|' r'A_StartMenuCommon|A_Startup|A_StartupCommon|A_StringCaseSense|' r'A_Tab|A_Temp|A_ThisFunc|A_ThisHotkey|A_ThisLabel|A_ThisMenu|' r'A_ThisMenuItem|A_ThisMenuItemPos|A_TickCount|A_TimeIdle|' r'A_TimeIdlePhysical|A_TimeSincePriorHotkey|A_TimeSinceThisHotkey|' r'A_TitleMatchMode|A_TitleMatchModeSpeed|A_UserName|A_WDay|' r'A_WinDelay|A_WinDir|A_WorkingDir|A_YDay|A_YEAR|A_YWeek|A_YYYY|' r'Clipboard|ClipboardAll|ComSpec|ErrorLevel|ProgramFiles|True|' r'False|A_IsUnicode|A_FileEncoding|A_OSVersion|A_PtrSize)\b', Name.Variable), ], 'labels': [ # hotkeys and labels # technically, hotkey names are limited to named keys and buttons (r'(^\s*)([^:\s\(\"]+?:{1,2})', bygroups(Text, Name.Label)), (r'(^\s*)(::[^:\s]+?::)', bygroups(Text, Name.Label)), ], 'numbers': [ (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), (r'\d+[eE][+-]?[0-9]+', Number.Float), (r'0\d+', Number.Oct), (r'0[xX][a-fA-F0-9]+', Number.Hex), (r'\d+L', Number.Integer.Long), (r'\d+', Number.Integer) ], 'stringescape': [ (r'\"\"|\`([\,\%\`abfnrtv])', String.Escape), ], 'strings': [ (r'[^"\n]+', String), ], 'dqs': [ (r'"', String, '#pop'), include('strings') ], 'garbage': [ (r'[^\S\n]', Text), # (r'.', Text), # no cheating ], } class MaqlLexer(RegexLexer): """ Lexer for `GoodData MAQL <https://secure.gooddata.com/docs/html/advanced.metric.tutorial.html>`_ scripts. *New in Pygments 1.4.* """ name = 'MAQL' aliases = ['maql'] filenames = ['*.maql'] mimetypes = ['text/x-gooddata-maql','application/x-gooddata-maql'] flags = re.IGNORECASE tokens = { 'root': [ # IDENTITY (r'IDENTIFIER\b', Name.Builtin), # IDENTIFIER (r'\{[^}]+\}', Name.Variable), # NUMBER (r'[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]{1,3})?', Literal.Number), # STRING (r'"', Literal.String, 'string-literal'), # RELATION (r'\<\>|\!\=', Operator), (r'\=|\>\=|\>|\<\=|\<', Operator), # := (r'\:\=', Operator), # OBJECT (r'\[[^]]+\]', Name.Variable.Class), # keywords (r'(DIMENSIONS?|BOTTOM|METRIC|COUNT|OTHER|FACT|WITH|TOP|OR|' r'ATTRIBUTE|CREATE|PARENT|FALSE|ROWS?|FROM|ALL|AS|PF|' r'COLUMNS?|DEFINE|REPORT|LIMIT|TABLE|LIKE|AND|BY|' r'BETWEEN|EXCEPT|SELECT|MATCH|WHERE|TRUE|FOR|IN|' r'WITHOUT|FILTER|ALIAS|ORDER|FACT|WHEN|NOT|ON|' r'KEYS|KEY|FULLSET|PRIMARY|LABELS|LABEL|VISUAL|' r'TITLE|DESCRIPTION|FOLDER|ALTER|DROP|ADD|DATASET|' r'DATATYPE|INT|BIGINT|DOUBLE|DATE|VARCHAR|DECIMAL|' r'SYNCHRONIZE|TYPE|DEFAULT|ORDER|ASC|DESC|HYPERLINK|' r'INCLUDE|TEMPLATE|MODIFY)\b', Keyword), # FUNCNAME (r'[a-zA-Z]\w*\b', Name.Function), # Comments (r'#.*', Comment.Single), # Punctuation (r'[,;\(\)]', Token.Punctuation), # Space is not significant (r'\s+', Text) ], 'string-literal': [ (r'\\[tnrfbae"\\]', String.Escape), (r'"', Literal.String, '#pop'), (r'[^\\"]+', Literal.String) ], } class GoodDataCLLexer(RegexLexer): """ Lexer for `GoodData-CL <http://github.com/gooddata/GoodData-CL/raw/master/cli/src/main/resources/com/gooddata/processor/COMMANDS.txt>`_ script files. *New in Pygments 1.4.* """ name = 'GoodData-CL' aliases = ['gooddata-cl'] filenames = ['*.gdc'] mimetypes = ['text/x-gooddata-cl'] flags = re.IGNORECASE tokens = { 'root': [ # Comments (r'#.*', Comment.Single), # Function call (r'[a-zA-Z]\w*', Name.Function), # Argument list (r'\(', Token.Punctuation, 'args-list'), # Punctuation (r';', Token.Punctuation), # Space is not significant (r'\s+', Text) ], 'args-list': [ (r'\)', Token.Punctuation, '#pop'), (r',', Token.Punctuation), (r'[a-zA-Z]\w*', Name.Variable), (r'=', Operator), (r'"', Literal.String, 'string-literal'), (r'[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]{1,3})?', Literal.Number), # Space is not significant (r'\s', Text) ], 'string-literal': [ (r'\\[tnrfbae"\\]', String.Escape), (r'"', Literal.String, '#pop'), (r'[^\\"]+', Literal.String) ] } class ProtoBufLexer(RegexLexer): """ Lexer for `Protocol Buffer <http://code.google.com/p/protobuf/>`_ definition files. *New in Pygments 1.4.* """ name = 'Protocol Buffer' aliases = ['protobuf', 'proto'] filenames = ['*.proto'] tokens = { 'root': [ (r'[ \t]+', Text), (r'[,;{}\[\]\(\)]', Punctuation), (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline), (r'\b(import|option|optional|required|repeated|default|packed|' r'ctype|extensions|to|max|rpc|returns)\b', Keyword), (r'(int32|int64|uint32|uint64|sint32|sint64|' r'fixed32|fixed64|sfixed32|sfixed64|' r'float|double|bool|string|bytes)\b', Keyword.Type), (r'(true|false)\b', Keyword.Constant), (r'(package)(\s+)', bygroups(Keyword.Namespace, Text), 'package'), (r'(message|extend)(\s+)', bygroups(Keyword.Declaration, Text), 'message'), (r'(enum|group|service)(\s+)', bygroups(Keyword.Declaration, Text), 'type'), (r'\".*\"', String), (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float), (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), (r'(\-?(inf|nan))', Number.Float), (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex), (r'0[0-7]+[LlUu]*', Number.Oct), (r'\d+[LlUu]*', Number.Integer), (r'[+-=]', Operator), (r'([a-zA-Z_][a-zA-Z0-9_\.]*)([ \t]*)(=)', bygroups(Name.Attribute, Text, Operator)), ('[a-zA-Z_][a-zA-Z0-9_\.]*', Name), ], 'package': [ (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Namespace, '#pop') ], 'message': [ (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop') ], 'type': [ (r'[a-zA-Z_][a-zA-Z0-9_]*', Name, '#pop') ], } class HybrisLexer(RegexLexer): """ For `Hybris <http://www.hybris-lang.org>`_ source code. *New in Pygments 1.4.* """ name = 'Hybris' aliases = ['hybris', 'hy'] filenames = ['*.hy', '*.hyb'] mimetypes = ['text/x-hybris', 'application/x-hybris'] flags = re.MULTILINE | re.DOTALL tokens = { 'root': [ # method names (r'^(\s*(?:function|method|operator\s+)+?)' r'([a-zA-Z_][a-zA-Z0-9_]*)' r'(\s*)(\()', bygroups(Keyword, Name.Function, Text, Operator)), (r'[^\S\n]+', Text), (r'//.*?\n', Comment.Single), (r'/\*.*?\*/', Comment.Multiline), (r'@[a-zA-Z_][a-zA-Z0-9_\.]*', Name.Decorator), (r'(break|case|catch|next|default|do|else|finally|for|foreach|of|' r'unless|if|new|return|switch|me|throw|try|while)\b', Keyword), (r'(extends|private|protected|public|static|throws|function|method|' r'operator)\b', Keyword.Declaration), (r'(true|false|null|__FILE__|__LINE__|__VERSION__|__LIB_PATH__|' r'__INC_PATH__)\b', Keyword.Constant), (r'(class|struct)(\s+)', bygroups(Keyword.Declaration, Text), 'class'), (r'(import|include)(\s+)', bygroups(Keyword.Namespace, Text), 'import'), (r'(gc_collect|gc_mm_items|gc_mm_usage|gc_collect_threshold|' r'urlencode|urldecode|base64encode|base64decode|sha1|crc32|sha2|' r'md5|md5_file|acos|asin|atan|atan2|ceil|cos|cosh|exp|fabs|floor|' r'fmod|log|log10|pow|sin|sinh|sqrt|tan|tanh|isint|isfloat|ischar|' r'isstring|isarray|ismap|isalias|typeof|sizeof|toint|tostring|' r'fromxml|toxml|binary|pack|load|eval|var_names|var_values|' r'user_functions|dyn_functions|methods|call|call_method|mknod|' r'mkfifo|mount|umount2|umount|ticks|usleep|sleep|time|strtime|' r'strdate|dllopen|dlllink|dllcall|dllcall_argv|dllclose|env|exec|' r'fork|getpid|wait|popen|pclose|exit|kill|pthread_create|' r'pthread_create_argv|pthread_exit|pthread_join|pthread_kill|' r'smtp_send|http_get|http_post|http_download|socket|bind|listen|' r'accept|getsockname|getpeername|settimeout|connect|server|recv|' r'send|close|print|println|printf|input|readline|serial_open|' r'serial_fcntl|serial_get_attr|serial_get_ispeed|serial_get_ospeed|' r'serial_set_attr|serial_set_ispeed|serial_set_ospeed|serial_write|' r'serial_read|serial_close|xml_load|xml_parse|fopen|fseek|ftell|' r'fsize|fread|fwrite|fgets|fclose|file|readdir|pcre_replace|size|' r'pop|unmap|has|keys|values|length|find|substr|replace|split|trim|' r'remove|contains|join)\b', Name.Builtin), (r'(MethodReference|Runner|Dll|Thread|Pipe|Process|Runnable|' r'CGI|ClientSocket|Socket|ServerSocket|File|Console|Directory|' r'Exception)\b', Keyword.Type), (r'"(\\\\|\\"|[^"])*"', String), (r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char), (r'(\.)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(Operator, Name.Attribute)), (r'[a-zA-Z_][a-zA-Z0-9_]*:', Name.Label), (r'[a-zA-Z_\$][a-zA-Z0-9_]*', Name), (r'[~\^\*!%&\[\]\(\)\{\}<>\|+=:;,./?\-@]+', Operator), (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), (r'0x[0-9a-f]+', Number.Hex), (r'[0-9]+L?', Number.Integer), (r'\n', Text), ], 'class': [ (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop') ], 'import': [ (r'[a-zA-Z0-9_.]+\*?', Name.Namespace, '#pop') ], } class AwkLexer(RegexLexer): """ For Awk scripts. *New in Pygments 1.5.* """ name = 'Awk' aliases = ['awk', 'gawk', 'mawk', 'nawk'] filenames = ['*.awk'] mimetypes = ['application/x-awk'] tokens = { 'commentsandwhitespace': [ (r'\s+', Text), (r'#.*$', Comment.Single) ], 'slashstartsregex': [ include('commentsandwhitespace'), (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/' r'\B', String.Regex, '#pop'), (r'(?=/)', Text, ('#pop', 'badregex')), (r'', Text, '#pop') ], 'badregex': [ (r'\n', Text, '#pop') ], 'root': [ (r'^(?=\s|/)', Text, 'slashstartsregex'), include('commentsandwhitespace'), (r'\+\+|--|\|\||&&|in|\$|!?~|' r'(\*\*|[-<>+*%\^/!=])=?', Operator, 'slashstartsregex'), (r'[{(\[;,]', Punctuation, 'slashstartsregex'), (r'[})\].]', Punctuation), (r'(break|continue|do|while|exit|for|if|' r'return)\b', Keyword, 'slashstartsregex'), (r'function\b', Keyword.Declaration, 'slashstartsregex'), (r'(atan2|cos|exp|int|log|rand|sin|sqrt|srand|gensub|gsub|index|' r'length|match|split|sprintf|sub|substr|tolower|toupper|close|' r'fflush|getline|next|nextfile|print|printf|strftime|systime|' r'delete|system)\b', Keyword.Reserved), (r'(ARGC|ARGIND|ARGV|CONVFMT|ENVIRON|ERRNO|FIELDWIDTHS|FILENAME|FNR|FS|' r'IGNORECASE|NF|NR|OFMT|OFS|ORFS|RLENGTH|RS|RSTART|RT|' r'SUBSEP)\b', Name.Builtin), (r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other), (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), (r'0x[0-9a-fA-F]+', Number.Hex), (r'[0-9]+', Number.Integer), (r'"(\\\\|\\"|[^"])*"', String.Double), (r"'(\\\\|\\'|[^'])*'", String.Single), ] } class Cfengine3Lexer(RegexLexer): """ Lexer for `CFEngine3 <http://cfengine.org>`_ policy files. *New in Pygments 1.5.* """ name = 'CFEngine3' aliases = ['cfengine3', 'cf3'] filenames = ['*.cf'] mimetypes = [] tokens = { 'root': [ (r'#.*?\n', Comment), (r'(body)(\s+)(\S+)(\s+)(control)', bygroups(Keyword, Text, Keyword, Text, Keyword)), (r'(body|bundle)(\s+)(\S+)(\s+)(\w+)(\()', bygroups(Keyword, Text, Keyword, Text, Name.Function, Punctuation), 'arglist'), (r'(body|bundle)(\s+)(\S+)(\s+)(\w+)', bygroups(Keyword, Text, Keyword, Text, Name.Function)), (r'(")([^"]+)(")(\s+)(string|slist|int|real)(\s*)(=>)(\s*)', bygroups(Punctuation,Name.Variable,Punctuation, Text,Keyword.Type,Text,Operator,Text)), (r'(\S+)(\s*)(=>)(\s*)', bygroups(Keyword.Reserved,Text,Operator,Text)), (r'"', String, 'string'), (r'(\w+)(\()', bygroups(Name.Function, Punctuation)), (r'([\w.!&|\(\)]+)(::)', bygroups(Name.Class, Punctuation)), (r'(\w+)(:)', bygroups(Keyword.Declaration,Punctuation)), (r'@[\{\(][^\)\}]+[\}\)]', Name.Variable), (r'[(){},;]', Punctuation), (r'=>', Operator), (r'->', Operator), (r'\d+\.\d+', Number.Float), (r'\d+', Number.Integer), (r'\w+', Name.Function), (r'\s+', Text), ], 'string': [ (r'\$[\{\(]', String.Interpol, 'interpol'), (r'\\.', String.Escape), (r'"', String, '#pop'), (r'\n', String), (r'.', String), ], 'interpol': [ (r'\$[\{\(]', String.Interpol, '#push'), (r'[\}\)]', String.Interpol, '#pop'), (r'[^\$\{\(\)\}]+', String.Interpol), ], 'arglist': [ (r'\)', Punctuation, '#pop'), (r',', Punctuation), (r'\w+', Name.Variable), (r'\s+', Text), ], } class SnobolLexer(RegexLexer): """ Lexer for the SNOBOL4 programming language. Recognizes the common ASCII equivalents of the original SNOBOL4 operators. Does not require spaces around binary operators. *New in Pygments 1.5.* """ name = "Snobol" aliases = ["snobol"] filenames = ['*.snobol'] mimetypes = ['text/x-snobol'] tokens = { # root state, start of line # comments, continuation lines, and directives start in column 1 # as do labels 'root': [ (r'\*.*\n', Comment), (r'[\+\.] ', Punctuation, 'statement'), (r'-.*\n', Comment), (r'END\s*\n', Name.Label, 'heredoc'), (r'[A-Za-z\$][\w$]*', Name.Label, 'statement'), (r'\s+', Text, 'statement'), ], # statement state, line after continuation or label 'statement': [ (r'\s*\n', Text, '#pop'), (r'\s+', Text), (r'(?<=[^\w.])(LT|LE|EQ|NE|GE|GT|INTEGER|IDENT|DIFFER|LGT|SIZE|' r'REPLACE|TRIM|DUPL|REMDR|DATE|TIME|EVAL|APPLY|OPSYN|LOAD|UNLOAD|' r'LEN|SPAN|BREAK|ANY|NOTANY|TAB|RTAB|REM|POS|RPOS|FAIL|FENCE|' r'ABORT|ARB|ARBNO|BAL|SUCCEED|INPUT|OUTPUT|TERMINAL)(?=[^\w.])', Name.Builtin), (r'[A-Za-z][\w\.]*', Name), # ASCII equivalents of original operators # | for the EBCDIC equivalent, ! likewise # \ for EBCDIC negation (r'\*\*|[\?\$\.!%\*/#+\-@\|&\\=]', Operator), (r'"[^"]*"', String), (r"'[^']*'", String), # Accept SPITBOL syntax for real numbers # as well as Macro SNOBOL4 (r'[0-9]+(?=[^\.EeDd])', Number.Integer), (r'[0-9]+(\.[0-9]*)?([EDed][-+]?[0-9]+)?', Number.Float), # Goto (r':', Punctuation, 'goto'), (r'[\(\)<>,;]', Punctuation), ], # Goto block 'goto': [ (r'\s*\n', Text, "#pop:2"), (r'\s+', Text), (r'F|S', Keyword), (r'(\()([A-Za-z][\w.]*)(\))', bygroups(Punctuation, Name.Label, Punctuation)) ], # everything after the END statement is basically one # big heredoc. 'heredoc': [ (r'.*\n', String.Heredoc) ] } class UrbiscriptLexer(ExtendedRegexLexer): """ For UrbiScript source code. *New in Pygments 1.5.* """ name = 'UrbiScript' aliases = ['urbiscript'] filenames = ['*.u'] mimetypes = ['application/x-urbiscript'] flags = re.DOTALL ## TODO # - handle Experimental and deprecated tags with specific tokens # - handle Angles and Durations with specific tokens def blob_callback(lexer, match, ctx): text_before_blob = match.group(1) blob_start = match.group(2) blob_size_str = match.group(3) blob_size = int(blob_size_str) yield match.start(), String, text_before_blob ctx.pos += len(text_before_blob) # if blob size doesn't match blob format (example : "\B(2)(aaa)") # yield blob as a string if ctx.text[match.end() + blob_size] != ")": result = "\\B(" + blob_size_str + ")(" yield match.start(), String, result ctx.pos += len(result) return # if blob is well formated, yield as Escape blob_text = blob_start + ctx.text[match.end():match.end()+blob_size] + ")" yield match.start(), String.Escape, blob_text ctx.pos = match.end() + blob_size + 1 # +1 is the ending ")" tokens = { 'root': [ (r'\s+', Text), # comments (r'//.*?\n', Comment), (r'/\*', Comment.Multiline, 'comment'), (r'(?:every|for|loop|while)(?:;|&|\||,)',Keyword), (r'(?:assert|at|break|case|catch|closure|compl|continue|' r'default|else|enum|every|external|finally|for|freezeif|if|new|' r'onleave|return|stopif|switch|this|throw|timeout|try|' r'waituntil|whenever|while)\b', Keyword), (r'(?:asm|auto|bool|char|const_cast|delete|double|dynamic_cast|' r'explicit|export|extern|float|friend|goto|inline|int|' r'long|mutable|namespace|register|reinterpret_cast|short|' r'signed|sizeof|static_cast|struct|template|typedef|typeid|' r'typename|union|unsigned|using|virtual|volatile|' r'wchar_t)\b', Keyword.Reserved), # deprecated keywords, use a meaningfull token when available (r'(?:emit|foreach|internal|loopn|static)\b', Keyword), # ignored keywords, use a meaningfull token when available (r'(?:private|protected|public)\b', Keyword), (r'(?:var|do|const|function|class)\b', Keyword.Declaration), (r'(?:true|false|nil|void)\b', Keyword.Constant), (r'(?:Barrier|Binary|Boolean|CallMessage|Channel|Code|' r'Comparable|Container|Control|Date|Dictionary|Directory|' r'Duration|Enumeration|Event|Exception|Executable|File|Finalizable|' r'Float|FormatInfo|Formatter|Global|Group|Hash|InputStream|' r'IoService|Job|Kernel|Lazy|List|Loadable|Lobby|Location|Logger|Math|' r'Mutex|nil|Object|Orderable|OutputStream|Pair|Path|Pattern|Position|' r'Primitive|Process|Profile|PseudoLazy|PubSub|RangeIterable|Regexp|' r'Semaphore|Server|Singleton|Socket|StackFrame|Stream|String|System|' r'Tag|Timeout|Traceable|TrajectoryGenerator|Triplet|Tuple' r'|UObject|UValue|UVar)\b', Name.Builtin), (r'(?:this)\b', Name.Builtin.Pseudo), # don't match single | and & (r'(?:[-=+*%/<>~^:]+|\.&?|\|\||&&)', Operator), (r'(?:and_eq|and|bitand|bitor|in|not|not_eq|or_eq|or|xor_eq|xor)\b', Operator.Word), (r'[{}\[\]()]+', Punctuation), (r'(?:;|\||,|&|\?|!)+', Punctuation), (r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other), (r'0x[0-9a-fA-F]+', Number.Hex), # Float, Integer, Angle and Duration (r'(?:[0-9]+(?:(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?)?' r'((?:rad|deg|grad)|(?:ms|s|min|h|d))?)\b', Number.Float), # handle binary blob in strings (r'"', String.Double, "string.double"), (r"'", String.Single, "string.single"), ], 'string.double': [ (r'((?:\\\\|\\"|[^"])*?)(\\B\((\d+)\)\()', blob_callback), (r'(\\\\|\\"|[^"])*?"', String.Double, '#pop'), ], 'string.single': [ (r"((?:\\\\|\\'|[^'])*?)(\\B\((\d+)\)\()", blob_callback), (r"(\\\\|\\'|[^'])*?'", String.Single, '#pop'), ], # from http://pygments.org/docs/lexerdevelopment/#changing-states 'comment': [ (r'[^*/]', Comment.Multiline), (r'/\*', Comment.Multiline, '#push'), (r'\*/', Comment.Multiline, '#pop'), (r'[*/]', Comment.Multiline), ] } class OpenEdgeLexer(RegexLexer): """ Lexer for `OpenEdge ABL (formerly Progress) <http://web.progress.com/en/openedge/abl.html>`_ source code. *New in Pygments 1.5.* """ name = 'OpenEdge ABL' aliases = ['openedge', 'abl', 'progress'] filenames = ['*.p', '*.cls'] mimetypes = ['text/x-openedge', 'application/x-openedge'] types = (r'(?i)(^|(?<=[^0-9a-z_\-]))(CHARACTER|CHAR|CHARA|CHARAC|CHARACT|CHARACTE|' r'COM-HANDLE|DATE|DATETIME|DATETIME-TZ|' r'DECIMAL|DEC|DECI|DECIM|DECIMA|HANDLE|' r'INT64|INTEGER|INT|INTE|INTEG|INTEGE|' r'LOGICAL|LONGCHAR|MEMPTR|RAW|RECID|ROWID)\s*($|(?=[^0-9a-z_\-]))') keywords = (r'(?i)(^|(?<=[^0-9a-z_\-]))(' + r'|'.join(OPENEDGEKEYWORDS) + r')\s*($|(?=[^0-9a-z_\-]))') tokens = { 'root': [ (r'/\*', Comment.Multiline, 'comment'), (r'\{', Comment.Preproc, 'preprocessor'), (r'\s*&.*', Comment.Preproc), (r'0[xX][0-9a-fA-F]+[LlUu]*', Number.Hex), (r'(?i)(DEFINE|DEF|DEFI|DEFIN)\b', Keyword.Declaration), (types, Keyword.Type), (keywords, Name.Builtin), (r'"(\\\\|\\"|[^"])*"', String.Double), (r"'(\\\\|\\'|[^'])*'", String.Single), (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), (r'[0-9]+', Number.Integer), (r'\s+', Text), (r'[+*/=-]', Operator), (r'[.:()]', Punctuation), (r'.', Name.Variable), # Lazy catch-all ], 'comment': [ (r'[^*/]', Comment.Multiline), (r'/\*', Comment.Multiline, '#push'), (r'\*/', Comment.Multiline, '#pop'), (r'[*/]', Comment.Multiline) ], 'preprocessor': [ (r'[^{}]', Comment.Preproc), (r'{', Comment.Preproc, '#push'), (r'}', Comment.Preproc, '#pop'), ], } class BroLexer(RegexLexer): """ For `Bro <http://bro-ids.org/>`_ scripts. *New in Pygments 1.5.* """ name = 'Bro' aliases = ['bro'] filenames = ['*.bro'] _hex = r'[0-9a-fA-F_]+' _float = r'((\d*\.?\d+)|(\d+\.?\d*))([eE][-+]?\d+)?' _h = r'[A-Za-z0-9][-A-Za-z0-9]*' tokens = { 'root': [ # Whitespace (r'^@.*?\n', Comment.Preproc), (r'#.*?\n', Comment.Single), (r'\n', Text), (r'\s+', Text), (r'\\\n', Text), # Keywords (r'(add|alarm|break|case|const|continue|delete|do|else|enum|event' r'|export|for|function|if|global|hook|local|module|next' r'|of|print|redef|return|schedule|switch|type|when|while)\b', Keyword), (r'(addr|any|bool|count|counter|double|file|int|interval|net' r'|pattern|port|record|set|string|subnet|table|time|timer' r'|vector)\b', Keyword.Type), (r'(T|F)\b', Keyword.Constant), (r'(&)((?:add|delete|expire)_func|attr|(?:create|read|write)_expire' r'|default|disable_print_hook|raw_output|encrypt|group|log' r'|mergeable|optional|persistent|priority|redef' r'|rotate_(?:interval|size)|synchronized)\b', bygroups(Punctuation, Keyword)), (r'\s+module\b', Keyword.Namespace), # Addresses, ports and networks (r'\d+/(tcp|udp|icmp|unknown)\b', Number), (r'(\d+\.){3}\d+', Number), (r'(' + _hex + r'){7}' + _hex, Number), (r'0x' + _hex + r'(' + _hex + r'|:)*::(' + _hex + r'|:)*', Number), (r'((\d+|:)(' + _hex + r'|:)*)?::(' + _hex + r'|:)*', Number), (r'(\d+\.\d+\.|(\d+\.){2}\d+)', Number), # Hostnames (_h + r'(\.' + _h + r')+', String), # Numeric (_float + r'\s+(day|hr|min|sec|msec|usec)s?\b', Literal.Date), (r'0[xX]' + _hex, Number.Hex), (_float, Number.Float), (r'\d+', Number.Integer), (r'/', String.Regex, 'regex'), (r'"', String, 'string'), # Operators (r'[!%*/+:<=>?~|-]', Operator), (r'([-+=&|]{2}|[+=!><-]=)', Operator), (r'(in|match)\b', Operator.Word), (r'[{}()\[\]$.,;]', Punctuation), # Identfier (r'([_a-zA-Z]\w*)(::)', bygroups(Name, Name.Namespace)), (r'[a-zA-Z_][a-zA-Z_0-9]*', Name) ], 'string': [ (r'"', String, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), (r'[^\\"\n]+', String), (r'\\\n', String), (r'\\', String) ], 'regex': [ (r'/', String.Regex, '#pop'), (r'\\[\\nt/]', String.Regex), # String.Escape is too intense here. (r'[^\\/\n]+', String.Regex), (r'\\\n', String.Regex), (r'\\', String.Regex) ] } class CbmBasicV2Lexer(RegexLexer): """ For CBM BASIC V2 sources. *New in Pygments 1.6.* """ name = 'CBM BASIC V2' aliases = ['cbmbas'] filenames = ['*.bas'] flags = re.IGNORECASE tokens = { 'root': [ (r'rem.*\n', Comment.Single), (r'\s+', Text), (r'new|run|end|for|to|next|step|go(to|sub)?|on|return|stop|cont' r'|if|then|input#?|read|wait|load|save|verify|poke|sys|print#?' r'|list|clr|cmd|open|close|get#?', Keyword.Reserved), (r'data|restore|dim|let|def|fn', Keyword.Declaration), (r'tab|spc|sgn|int|abs|usr|fre|pos|sqr|rnd|log|exp|cos|sin|tan|atn' r'|peek|len|val|asc|(str|chr|left|right|mid)\$', Name.Builtin), (r'[-+*/^<>=]', Operator), (r'not|and|or', Operator.Word), (r'"[^"\n]*.', String), (r'\d+|[-+]?\d*\.\d*(e[-+]?\d+)?', Number.Float), (r'[\(\),:;]', Punctuation), (r'\w+[$%]?', Name), ] } def analyse_text(self, text): # if it starts with a line number, it shouldn't be a "modern" Basic # like VB.net if re.match(r'\d+', text): return True class MscgenLexer(RegexLexer): """ For `Mscgen <http://www.mcternan.me.uk/mscgen/>`_ files. *New in Pygments 1.6.* """ name = 'Mscgen' aliases = ['mscgen', 'msc'] filenames = ['*.msc'] _var = r'([a-zA-Z0-9_]+|"(?:\\"|[^"])*")' tokens = { 'root': [ (r'msc\b', Keyword.Type), # Options (r'(hscale|HSCALE|width|WIDTH|wordwraparcs|WORDWRAPARCS' r'|arcgradient|ARCGRADIENT)\b', Name.Property), # Operators (r'(abox|ABOX|rbox|RBOX|box|BOX|note|NOTE)\b', Operator.Word), (r'(\.|-|\|){3}', Keyword), (r'(?:-|=|\.|:){2}' r'|<<=>>|<->|<=>|<<>>|<:>' r'|->|=>>|>>|=>|:>|-x|-X' r'|<-|<<=|<<|<=|<:|x-|X-|=', Operator), # Names (r'\*', Name.Builtin), (_var, Name.Variable), # Other (r'\[', Punctuation, 'attrs'), (r'\{|\}|,|;', Punctuation), include('comments') ], 'attrs': [ (r'\]', Punctuation, '#pop'), (_var + r'(\s*)(=)(\s*)' + _var, bygroups(Name.Attribute, Text.Whitespace, Operator, Text.Whitespace, String)), (r',', Punctuation), include('comments') ], 'comments': [ (r'(?://|#).*?\n', Comment.Single), (r'/\*(?:.|\n)*?\*/', Comment.Multiline), (r'[ \t\r\n]+', Text.Whitespace) ] } def _rx_indent(level): # Kconfig *always* interprets a tab as 8 spaces, so this is the default. # Edit this if you are in an environment where KconfigLexer gets expanded # input (tabs expanded to spaces) and the expansion tab width is != 8, # e.g. in connection with Trac (trac.ini, [mimeviewer], tab_width). # Value range here is 2 <= {tab_width} <= 8. tab_width = 8 # Regex matching a given indentation {level}, assuming that indentation is # a multiple of {tab_width}. In other cases there might be problems. return r'(?:\t| {1,%s}\t| {%s}){%s}.*\n' % (tab_width-1, tab_width, level) class KconfigLexer(RegexLexer): """ For Linux-style Kconfig files. *New in Pygments 1.6.* """ name = 'Kconfig' aliases = ['kconfig', 'menuconfig', 'linux-config', 'kernel-config'] # Adjust this if new kconfig file names appear in your environment filenames = ['Kconfig', '*Config.in*', 'external.in*', 'standard-modules.in'] mimetypes = ['text/x-kconfig'] # No re.MULTILINE, indentation-aware help text needs line-by-line handling flags = 0 def call_indent(level): # If indentation >= {level} is detected, enter state 'indent{level}' return (_rx_indent(level), String.Doc, 'indent%s' % level) def do_indent(level): # Print paragraphs of indentation level >= {level} as String.Doc, # ignoring blank lines. Then return to 'root' state. return [ (_rx_indent(level), String.Doc), (r'\s*\n', Text), (r'', Generic, '#pop:2') ] tokens = { 'root': [ (r'\s+', Text), (r'#.*?\n', Comment.Single), (r'(mainmenu|config|menuconfig|choice|endchoice|comment|menu|' r'endmenu|visible if|if|endif|source|prompt|select|depends on|' r'default|range|option)\b', Keyword), (r'(---help---|help)[\t ]*\n', Keyword, 'help'), (r'(bool|tristate|string|hex|int|defconfig_list|modules|env)\b', Name.Builtin), (r'[!=&|]', Operator), (r'[()]', Punctuation), (r'[0-9]+', Number.Integer), (r"'(''|[^'])*'", String.Single), (r'"(""|[^"])*"', String.Double), (r'\S+', Text), ], # Help text is indented, multi-line and ends when a lower indentation # level is detected. 'help': [ # Skip blank lines after help token, if any (r'\s*\n', Text), # Determine the first help line's indentation level heuristically(!). # Attention: this is not perfect, but works for 99% of "normal" # indentation schemes up to a max. indentation level of 7. call_indent(7), call_indent(6), call_indent(5), call_indent(4), call_indent(3), call_indent(2), call_indent(1), ('', Text, '#pop'), # for incomplete help sections without text ], # Handle text for indentation levels 7 to 1 'indent7': do_indent(7), 'indent6': do_indent(6), 'indent5': do_indent(5), 'indent4': do_indent(4), 'indent3': do_indent(3), 'indent2': do_indent(2), 'indent1': do_indent(1), } class VGLLexer(RegexLexer): """ For `SampleManager VGL <http://www.thermoscientific.com/samplemanager>`_ source code. *New in Pygments 1.6.* """ name = 'VGL' aliases = ['vgl'] filenames = ['*.rpf'] flags = re.MULTILINE | re.DOTALL | re.IGNORECASE tokens = { 'root': [ (r'\{[^\}]*\}', Comment.Multiline), (r'declare', Keyword.Constant), (r'(if|then|else|endif|while|do|endwhile|and|or|prompt|object' r'|create|on|line|with|global|routine|value|endroutine|constant' r'|global|set|join|library|compile_option|file|exists|create|copy' r'|delete|enable|windows|name|notprotected)(?! *[=<>.,()])', Keyword), (r'(true|false|null|empty|error|locked)', Keyword.Constant), (r'[~\^\*\#!%&\[\]\(\)<>\|+=:;,./?-]', Operator), (r'"[^"]*"', String), (r'(\.)([a-z_\$][a-z0-9_\$]*)', bygroups(Operator, Name.Attribute)), (r'[0-9][0-9]*(\.[0-9]+(e[+\-]?[0-9]+)?)?', Number), (r'[a-z_\$][a-z0-9_\$]*', Name), (r'[\r\n]+', Text), (r'\s+', Text) ] } class SourcePawnLexer(RegexLexer): """ For SourcePawn source code with preprocessor directives. *New in Pygments 1.6.* """ name = 'SourcePawn' aliases = ['sp'] filenames = ['*.sp'] mimetypes = ['text/x-sourcepawn'] #: optional Comment or Whitespace _ws = r'(?:\s|//.*?\n|/\*.*?\*/)+' tokens = { 'root': [ # preprocessor directives: without whitespace ('^#if\s+0', Comment.Preproc, 'if0'), ('^#', Comment.Preproc, 'macro'), # or with whitespace ('^' + _ws + r'#if\s+0', Comment.Preproc, 'if0'), ('^' + _ws + '#', Comment.Preproc, 'macro'), (r'\n', Text), (r'\s+', Text), (r'\\\n', Text), # line continuation (r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single), (r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline), (r'[{}]', Punctuation), (r'L?"', String, 'string'), (r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char), (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float), (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float), (r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex), (r'0[0-7]+[LlUu]*', Number.Oct), (r'\d+[LlUu]*', Number.Integer), (r'\*/', Error), (r'[~!%^&*+=|?:<>/-]', Operator), (r'[()\[\],.;]', Punctuation), (r'(case|const|continue|native|' r'default|else|enum|for|if|new|operator|' r'public|return|sizeof|static|decl|struct|switch)\b', Keyword), (r'(bool|Float)\b', Keyword.Type), (r'(true|false)\b', Keyword.Constant), ('[a-zA-Z_][a-zA-Z0-9_]*', Name), ], 'string': [ (r'"', String, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), (r'[^\\"\n]+', String), # all other characters (r'\\\n', String), # line continuation (r'\\', String), # stray backslash ], 'macro': [ (r'[^/\n]+', Comment.Preproc), (r'/\*(.|\n)*?\*/', Comment.Multiline), (r'//.*?\n', Comment.Single, '#pop'), (r'/', Comment.Preproc), (r'(?<=\\)\n', Comment.Preproc), (r'\n', Comment.Preproc, '#pop'), ], 'if0': [ (r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'), (r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'), (r'.*?\n', Comment), ] } SM_TYPES = ['Action', 'bool', 'Float', 'Plugin', 'String', 'any', 'AdminFlag', 'OverrideType', 'OverrideRule', 'ImmunityType', 'GroupId', 'AdminId', 'AdmAccessMode', 'AdminCachePart', 'CookieAccess', 'CookieMenu', 'CookieMenuAction', 'NetFlow', 'ConVarBounds', 'QueryCookie', 'ReplySource', 'ConVarQueryResult', 'ConVarQueryFinished', 'Function', 'Action', 'Identity', 'PluginStatus', 'PluginInfo', 'DBResult', 'DBBindType', 'DBPriority', 'PropType', 'PropFieldType', 'MoveType', 'RenderMode', 'RenderFx', 'EventHookMode', 'EventHook', 'FileType', 'FileTimeMode', 'PathType', 'ParamType', 'ExecType', 'DialogType', 'Handle', 'KvDataTypes', 'NominateResult', 'MapChange', 'MenuStyle', 'MenuAction', 'MenuSource', 'RegexError', 'SDKCallType', 'SDKLibrary', 'SDKFuncConfSource', 'SDKType', 'SDKPassMethod', 'RayType', 'TraceEntityFilter', 'ListenOverride', 'SortOrder', 'SortType', 'SortFunc2D', 'APLRes', 'FeatureType', 'FeatureStatus', 'SMCResult', 'SMCError', 'TFClassType', 'TFTeam', 'TFCond', 'TFResourceType', 'Timer', 'TopMenuAction', 'TopMenuObjectType', 'TopMenuPosition', 'TopMenuObject', 'UserMsg'] def __init__(self, **options): self.smhighlighting = get_bool_opt(options, 'sourcemod', True) self._functions = [] if self.smhighlighting: from pygments.lexers._sourcemodbuiltins import FUNCTIONS self._functions.extend(FUNCTIONS) RegexLexer.__init__(self, **options) def get_tokens_unprocessed(self, text): for index, token, value in \ RegexLexer.get_tokens_unprocessed(self, text): if token is Name: if self.smhighlighting: if value in self.SM_TYPES: token = Keyword.Type elif value in self._functions: token = Name.Builtin yield index, token, value class PuppetLexer(RegexLexer): """ For `Puppet <http://puppetlabs.com/>`__ configuration DSL. *New in Pygments 1.6.* """ name = 'Puppet' aliases = ['puppet'] filenames = ['*.pp'] tokens = { 'root': [ include('comments'), include('keywords'), include('names'), include('numbers'), include('operators'), include('strings'), (r'[]{}:(),;[]', Punctuation), (r'[^\S\n]+', Text), ], 'comments': [ (r'\s*#.*$', Comment), (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline), ], 'operators': [ (r'(=>|\?|<|>|=|\+|-|/|\*|~|!|\|)', Operator), (r'(in|and|or|not)\b', Operator.Word), ], 'names': [ ('[a-zA-Z_][a-zA-Z0-9_]*', Name.Attribute), (r'(\$\S+)(\[)(\S+)(\])', bygroups(Name.Variable, Punctuation, String, Punctuation)), (r'\$\S+', Name.Variable), ], 'numbers': [ # Copypasta from the Python lexer (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float), (r'\d+[eE][+-]?[0-9]+j?', Number.Float), (r'0[0-7]+j?', Number.Oct), (r'0[xX][a-fA-F0-9]+', Number.Hex), (r'\d+L', Number.Integer.Long), (r'\d+j?', Number.Integer) ], 'keywords': [ # Left out 'group' and 'require' # Since they're often used as attributes (r'(?i)(absent|alert|alias|audit|augeas|before|case|check|class|' r'computer|configured|contained|create_resources|crit|cron|debug|' r'default|define|defined|directory|else|elsif|emerg|err|exec|' r'extlookup|fail|false|file|filebucket|fqdn_rand|generate|host|if|' r'import|include|info|inherits|inline_template|installed|' r'interface|k5login|latest|link|loglevel|macauthorization|' r'mailalias|maillist|mcx|md5|mount|mounted|nagios_command|' r'nagios_contact|nagios_contactgroup|nagios_host|' r'nagios_hostdependency|nagios_hostescalation|nagios_hostextinfo|' r'nagios_hostgroup|nagios_service|nagios_servicedependency|' r'nagios_serviceescalation|nagios_serviceextinfo|' r'nagios_servicegroup|nagios_timeperiod|node|noop|notice|notify|' r'package|present|purged|realize|regsubst|resources|role|router|' r'running|schedule|scheduled_task|search|selboolean|selmodule|' r'service|sha1|shellquote|split|sprintf|ssh_authorized_key|sshkey|' r'stage|stopped|subscribe|tag|tagged|template|tidy|true|undef|' r'unmounted|user|versioncmp|vlan|warning|yumrepo|zfs|zone|' r'zpool)\b', Keyword), ], 'strings': [ (r'"([^"])*"', String), (r'\'([^\'])*\'', String), ], } class NSISLexer(RegexLexer): """ For `NSIS <http://nsis.sourceforge.net/>`_ scripts. *New in Pygments 1.6.* """ name = 'NSIS' aliases = ['nsis', 'nsi', 'nsh'] filenames = ['*.nsi', '*.nsh'] mimetypes = ['text/x-nsis'] flags = re.IGNORECASE tokens = { 'root': [ (r'[;\#].*\n', Comment), (r"'.*?'", String.Single), (r'"', String.Double, 'str_double'), (r'`', String.Backtick, 'str_backtick'), include('macro'), include('interpol'), include('basic'), (r'\$\{[a-z_|][\w|]*\}', Keyword.Pseudo), (r'/[a-z_]\w*', Name.Attribute), ('.', Text), ], 'basic': [ (r'(\n)(Function)(\s+)([._a-z][.\w]*)\b', bygroups(Text, Keyword, Text, Name.Function)), (r'\b([_a-z]\w*)(::)([a-z][a-z0-9]*)\b', bygroups(Keyword.Namespace, Punctuation, Name.Function)), (r'\b([_a-z]\w*)(:)', bygroups(Name.Label, Punctuation)), (r'(\b[ULS]|\B)([\!\<\>=]?=|\<\>?|\>)\B', Operator), (r'[|+-]', Operator), (r'\\', Punctuation), (r'\b(Abort|Add(?:BrandingImage|Size)|' r'Allow(?:RootDirInstall|SkipFiles)|AutoCloseWindow|' r'BG(?:Font|Gradient)|BrandingText|BringToFront|Call(?:InstDLL)?|' r'(?:Sub)?Caption|ChangeUI|CheckBitmap|ClearErrors|CompletedText|' r'ComponentText|CopyFiles|CRCCheck|' r'Create(?:Directory|Font|Shortcut)|Delete(?:INI(?:Sec|Str)|' r'Reg(?:Key|Value))?|DetailPrint|DetailsButtonText|' r'Dir(?:Show|Text|Var|Verify)|(?:Disabled|Enabled)Bitmap|' r'EnableWindow|EnumReg(?:Key|Value)|Exch|Exec(?:Shell|Wait)?|' r'ExpandEnvStrings|File(?:BufSize|Close|ErrorText|Open|' r'Read(?:Byte)?|Seek|Write(?:Byte)?)?|' r'Find(?:Close|First|Next|Window)|FlushINI|Function(?:End)?|' r'Get(?:CurInstType|CurrentAddress|DlgItem|DLLVersion(?:Local)?|' r'ErrorLevel|FileTime(?:Local)?|FullPathName|FunctionAddress|' r'InstDirError|LabelAddress|TempFileName)|' r'Goto|HideWindow|Icon|' r'If(?:Abort|Errors|FileExists|RebootFlag|Silent)|' r'InitPluginsDir|Install(?:ButtonText|Colors|Dir(?:RegKey)?)|' r'Inst(?:ProgressFlags|Type(?:[GS]etText)?)|Int(?:CmpU?|Fmt|Op)|' r'IsWindow|LangString(?:UP)?|' r'License(?:BkColor|Data|ForceSelection|LangString|Text)|' r'LoadLanguageFile|LockWindow|Log(?:Set|Text)|MessageBox|' r'MiscButtonText|Name|Nop|OutFile|(?:Uninst)?Page(?:Ex(?:End)?)?|' r'PluginDir|Pop|Push|Quit|Read(?:(?:Env|INI|Reg)Str|RegDWORD)|' r'Reboot|(?:Un)?RegDLL|Rename|RequestExecutionLevel|ReserveFile|' r'Return|RMDir|SearchPath|Section(?:Divider|End|' r'(?:(?:Get|Set)(?:Flags|InstTypes|Size|Text))|Group(?:End)?|In)?|' r'SendMessage|Set(?:AutoClose|BrandingImage|Compress(?:ionLevel|' r'or(?:DictSize)?)?|CtlColors|CurInstType|DatablockOptimize|' r'DateSave|Details(?:Print|View)|Error(?:s|Level)|FileAttributes|' r'Font|OutPath|Overwrite|PluginUnload|RebootFlag|ShellVarContext|' r'Silent|StaticBkColor)|' r'Show(?:(?:I|Uni)nstDetails|Window)|Silent(?:Un)?Install|Sleep|' r'SpaceTexts|Str(?:CmpS?|Cpy|Len)|SubSection(?:End)?|' r'Uninstall(?:ButtonText|(?:Sub)?Caption|EXEName|Icon|Text)|' r'UninstPage|Var|VI(?:AddVersionKey|ProductVersion)|WindowIcon|' r'Write(?:INIStr|Reg(:?Bin|DWORD|(?:Expand)?Str)|Uninstaller)|' r'XPStyle)\b', Keyword), (r'\b(CUR|END|(?:FILE_ATTRIBUTE_)?' r'(?:ARCHIVE|HIDDEN|NORMAL|OFFLINE|READONLY|SYSTEM|TEMPORARY)|' r'HK(CC|CR|CU|DD|LM|PD|U)|' r'HKEY_(?:CLASSES_ROOT|CURRENT_(?:CONFIG|USER)|DYN_DATA|' r'LOCAL_MACHINE|PERFORMANCE_DATA|USERS)|' r'ID(?:ABORT|CANCEL|IGNORE|NO|OK|RETRY|YES)|' r'MB_(?:ABORTRETRYIGNORE|DEFBUTTON[1-4]|' r'ICON(?:EXCLAMATION|INFORMATION|QUESTION|STOP)|' r'OK(?:CANCEL)?|RETRYCANCEL|RIGHT|SETFOREGROUND|TOPMOST|USERICON|' r'YESNO(?:CANCEL)?)|SET|SHCTX|' r'SW_(?:HIDE|SHOW(?:MAXIMIZED|MINIMIZED|NORMAL))|' r'admin|all|auto|both|bottom|bzip2|checkbox|colored|current|false|' r'force|hide|highest|if(?:diff|newer)|lastused|leave|left|' r'listonly|lzma|nevershow|none|normal|off|on|pop|push|' r'radiobuttons|right|show|silent|silentlog|smooth|textonly|top|' r'true|try|user|zlib)\b', Name.Constant), ], 'macro': [ (r'\!(addincludedir(?:dir)?|addplugindir|appendfile|cd|define|' r'delfilefile|echo(?:message)?|else|endif|error|execute|' r'if(?:macro)?n?(?:def)?|include|insertmacro|macro(?:end)?|packhdr|' r'search(?:parse|replace)|system|tempfilesymbol|undef|verbose|' r'warning)\b', Comment.Preproc), ], 'interpol': [ (r'\$(R?[0-9])', Name.Builtin.Pseudo), # registers (r'\$(ADMINTOOLS|APPDATA|CDBURN_AREA|COOKIES|COMMONFILES(?:32|64)|' r'DESKTOP|DOCUMENTS|EXE(?:DIR|FILE|PATH)|FAVORITES|FONTS|HISTORY|' r'HWNDPARENT|INTERNET_CACHE|LOCALAPPDATA|MUSIC|NETHOOD|PICTURES|' r'PLUGINSDIR|PRINTHOOD|PROFILE|PROGRAMFILES(?:32|64)|QUICKLAUNCH|' r'RECENT|RESOURCES(?:_LOCALIZED)?|SENDTO|SM(?:PROGRAMS|STARTUP)|' r'STARTMENU|SYSDIR|TEMP(?:LATES)?|VIDEOS|WINDIR|\{NSISDIR\})', Name.Builtin), (r'\$(CMDLINE|INSTDIR|OUTDIR|LANGUAGE)', Name.Variable.Global), (r'\$[a-z_]\w*', Name.Variable), ], 'str_double': [ (r'"', String, '#pop'), (r'\$(\\[nrt"]|\$)', String.Escape), include('interpol'), (r'.', String.Double), ], 'str_backtick': [ (r'`', String, '#pop'), (r'\$(\\[nrt"]|\$)', String.Escape), include('interpol'), (r'.', String.Double), ], } class RPMSpecLexer(RegexLexer): """ For RPM *.spec files *New in Pygments 1.6.* """ name = 'RPMSpec' aliases = ['spec'] filenames = ['*.spec'] mimetypes = ['text/x-rpm-spec'] _directives = ('(?:package|prep|build|install|clean|check|pre[a-z]*|' 'post[a-z]*|trigger[a-z]*|files)') tokens = { 'root': [ (r'#.*\n', Comment), include('basic'), ], 'description': [ (r'^(%' + _directives + ')(.*)$', bygroups(Name.Decorator, Text), '#pop'), (r'\n', Text), (r'.', Text), ], 'changelog': [ (r'\*.*\n', Generic.Subheading), (r'^(%' + _directives + ')(.*)$', bygroups(Name.Decorator, Text), '#pop'), (r'\n', Text), (r'.', Text), ], 'string': [ (r'"', String.Double, '#pop'), (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape), include('interpol'), (r'.', String.Double), ], 'basic': [ include('macro'), (r'(?i)^(Name|Version|Release|Epoch|Summary|Group|License|Packager|' r'Vendor|Icon|URL|Distribution|Prefix|Patch[0-9]*|Source[0-9]*|' r'Requires\(?[a-z]*\)?|[a-z]+Req|Obsoletes|Suggests|Provides|Conflicts|' r'Build[a-z]+|[a-z]+Arch|Auto[a-z]+)(:)(.*)$', bygroups(Generic.Heading, Punctuation, using(this))), (r'^%description', Name.Decorator, 'description'), (r'^%changelog', Name.Decorator, 'changelog'), (r'^(%' + _directives + ')(.*)$', bygroups(Name.Decorator, Text)), (r'%(attr|defattr|dir|doc(?:dir)?|setup|config(?:ure)?|' r'make(?:install)|ghost|patch[0-9]+|find_lang|exclude|verify)', Keyword), include('interpol'), (r"'.*?'", String.Single), (r'"', String.Double, 'string'), (r'.', Text), ], 'macro': [ (r'%define.*\n', Comment.Preproc), (r'%\{\!\?.*%define.*\}', Comment.Preproc), (r'(%(?:if(?:n?arch)?|else(?:if)?|endif))(.*)$', bygroups(Comment.Preproc, Text)), ], 'interpol': [ (r'%\{?__[a-z_]+\}?', Name.Function), (r'%\{?_([a-z_]+dir|[a-z_]+path|prefix)\}?', Keyword.Pseudo), (r'%\{\?[A-Za-z0-9_]+\}', Name.Variable), (r'\$\{?RPM_[A-Z0-9_]+\}?', Name.Variable.Global), (r'%\{[a-zA-Z][a-zA-Z0-9_]+\}', Keyword.Constant), ] } class AutoItLexer(RegexLexer): """ For `AutoIt <http://www.autoitscript.com/site/autoit/>`_ files. AutoIt is a freeware BASIC-like scripting language designed for automating the Windows GUI and general scripting *New in Pygments 1.6.* """ name = 'AutoIt' aliases = ['autoit', 'Autoit'] filenames = ['*.au3'] mimetypes = ['text/x-autoit'] # Keywords, functions, macros from au3.keywords.properties # which can be found in AutoIt installed directory, e.g. # c:\Program Files (x86)\AutoIt3\SciTE\au3.keywords.properties keywords = """\ #include-once #include #endregion #forcedef #forceref #region and byref case continueloop dim do else elseif endfunc endif endselect exit exitloop for func global if local next not or return select step then to until wend while exit""".split() functions = """\ abs acos adlibregister adlibunregister asc ascw asin assign atan autoitsetoption autoitwingettitle autoitwinsettitle beep binary binarylen binarymid binarytostring bitand bitnot bitor bitrotate bitshift bitxor blockinput break call cdtray ceiling chr chrw clipget clipput consoleread consolewrite consolewriteerror controlclick controlcommand controldisable controlenable controlfocus controlgetfocus controlgethandle controlgetpos controlgettext controlhide controllistview controlmove controlsend controlsettext controlshow controltreeview cos dec dircopy dircreate dirgetsize dirmove dirremove dllcall dllcalladdress dllcallbackfree dllcallbackgetptr dllcallbackregister dllclose dllopen dllstructcreate dllstructgetdata dllstructgetptr dllstructgetsize dllstructsetdata drivegetdrive drivegetfilesystem drivegetlabel drivegetserial drivegettype drivemapadd drivemapdel drivemapget drivesetlabel drivespacefree drivespacetotal drivestatus envget envset envupdate eval execute exp filechangedir fileclose filecopy filecreatentfslink filecreateshortcut filedelete fileexists filefindfirstfile filefindnextfile fileflush filegetattrib filegetencoding filegetlongname filegetpos filegetshortcut filegetshortname filegetsize filegettime filegetversion fileinstall filemove fileopen fileopendialog fileread filereadline filerecycle filerecycleempty filesavedialog fileselectfolder filesetattrib filesetpos filesettime filewrite filewriteline floor ftpsetproxy guicreate guictrlcreateavi guictrlcreatebutton guictrlcreatecheckbox guictrlcreatecombo guictrlcreatecontextmenu guictrlcreatedate guictrlcreatedummy guictrlcreateedit guictrlcreategraphic guictrlcreategroup guictrlcreateicon guictrlcreateinput guictrlcreatelabel guictrlcreatelist guictrlcreatelistview guictrlcreatelistviewitem guictrlcreatemenu guictrlcreatemenuitem guictrlcreatemonthcal guictrlcreateobj guictrlcreatepic guictrlcreateprogress guictrlcreateradio guictrlcreateslider guictrlcreatetab guictrlcreatetabitem guictrlcreatetreeview guictrlcreatetreeviewitem guictrlcreateupdown guictrldelete guictrlgethandle guictrlgetstate guictrlread guictrlrecvmsg guictrlregisterlistviewsort guictrlsendmsg guictrlsendtodummy guictrlsetbkcolor guictrlsetcolor guictrlsetcursor guictrlsetdata guictrlsetdefbkcolor guictrlsetdefcolor guictrlsetfont guictrlsetgraphic guictrlsetimage guictrlsetlimit guictrlsetonevent guictrlsetpos guictrlsetresizing guictrlsetstate guictrlsetstyle guictrlsettip guidelete guigetcursorinfo guigetmsg guigetstyle guiregistermsg guisetaccelerators guisetbkcolor guisetcoord guisetcursor guisetfont guisethelp guiseticon guisetonevent guisetstate guisetstyle guistartgroup guiswitch hex hotkeyset httpsetproxy httpsetuseragent hwnd inetclose inetget inetgetinfo inetgetsize inetread inidelete iniread inireadsection inireadsectionnames inirenamesection iniwrite iniwritesection inputbox int isadmin isarray isbinary isbool isdeclared isdllstruct isfloat ishwnd isint iskeyword isnumber isobj isptr isstring log memgetstats mod mouseclick mouseclickdrag mousedown mousegetcursor mousegetpos mousemove mouseup mousewheel msgbox number objcreate objcreateinterface objevent objevent objget objname onautoitexitregister onautoitexitunregister opt ping pixelchecksum pixelgetcolor pixelsearch pluginclose pluginopen processclose processexists processgetstats processlist processsetpriority processwait processwaitclose progressoff progresson progressset ptr random regdelete regenumkey regenumval regread regwrite round run runas runaswait runwait send sendkeepactive seterror setextended shellexecute shellexecutewait shutdown sin sleep soundplay soundsetwavevolume splashimageon splashoff splashtexton sqrt srandom statusbargettext stderrread stdinwrite stdioclose stdoutread string stringaddcr stringcompare stringformat stringfromasciiarray stringinstr stringisalnum stringisalpha stringisascii stringisdigit stringisfloat stringisint stringislower stringisspace stringisupper stringisxdigit stringleft stringlen stringlower stringmid stringregexp stringregexpreplace stringreplace stringright stringsplit stringstripcr stringstripws stringtoasciiarray stringtobinary stringtrimleft stringtrimright stringupper tan tcpaccept tcpclosesocket tcpconnect tcplisten tcpnametoip tcprecv tcpsend tcpshutdown tcpstartup timerdiff timerinit tooltip traycreateitem traycreatemenu traygetmsg trayitemdelete trayitemgethandle trayitemgetstate trayitemgettext trayitemsetonevent trayitemsetstate trayitemsettext traysetclick trayseticon traysetonevent traysetpauseicon traysetstate traysettooltip traytip ubound udpbind udpclosesocket udpopen udprecv udpsend udpshutdown udpstartup vargettype winactivate winactive winclose winexists winflash wingetcaretpos wingetclasslist wingetclientsize wingethandle wingetpos wingetprocess wingetstate wingettext wingettitle winkill winlist winmenuselectitem winminimizeall winminimizeallundo winmove winsetontop winsetstate winsettitle winsettrans winwait winwaitactive winwaitclose winwaitnotactive""".split() macros = """\ @appdatacommondir @appdatadir @autoitexe @autoitpid @autoitversion @autoitx64 @com_eventobj @commonfilesdir @compiled @computername @comspec @cpuarch @cr @crlf @desktopcommondir @desktopdepth @desktopdir @desktopheight @desktoprefresh @desktopwidth @documentscommondir @error @exitcode @exitmethod @extended @favoritescommondir @favoritesdir @gui_ctrlhandle @gui_ctrlid @gui_dragfile @gui_dragid @gui_dropid @gui_winhandle @homedrive @homepath @homeshare @hotkeypressed @hour @ipaddress1 @ipaddress2 @ipaddress3 @ipaddress4 @kblayout @lf @logondnsdomain @logondomain @logonserver @mday @min @mon @msec @muilang @mydocumentsdir @numparams @osarch @osbuild @oslang @osservicepack @ostype @osversion @programfilesdir @programscommondir @programsdir @scriptdir @scriptfullpath @scriptlinenumber @scriptname @sec @startmenucommondir @startmenudir @startupcommondir @startupdir @sw_disable @sw_enable @sw_hide @sw_lock @sw_maximize @sw_minimize @sw_restore @sw_show @sw_showdefault @sw_showmaximized @sw_showminimized @sw_showminnoactive @sw_showna @sw_shownoactivate @sw_shownormal @sw_unlock @systemdir @tab @tempdir @tray_id @trayiconflashing @trayiconvisible @username @userprofiledir @wday @windowsdir @workingdir @yday @year""".split() tokens = { 'root': [ (r';.*\n', Comment.Single), (r'(#comments-start|#cs).*?(#comments-end|#ce)', Comment.Multiline), (r'[\[\]{}(),;]', Punctuation), (r'(and|or|not)\b', Operator.Word), (r'[\$|@][a-zA-Z_][a-zA-Z0-9_]*', Name.Variable), (r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator), include('commands'), include('labels'), include('builtInFunctions'), include('builtInMarcros'), (r'"', String, combined('stringescape', 'dqs')), include('numbers'), (r'[a-zA-Z_#@$][a-zA-Z0-9_#@$]*', Name), (r'\\|\'', Text), (r'\`([\,\%\`abfnrtv\-\+;])', String.Escape), (r'_\n', Text), # Line continuation include('garbage'), ], 'commands': [ (r'(?i)(\s*)(%s)\b' % '|'.join(keywords), bygroups(Text, Name.Builtin)), ], 'builtInFunctions': [ (r'(?i)(%s)\b' % '|'.join(functions), Name.Function), ], 'builtInMarcros': [ (r'(?i)(%s)\b' % '|'.join(macros), Name.Variable.Global), ], 'labels': [ # sendkeys (r'(^\s*)({\S+?})', bygroups(Text, Name.Label)), ], 'numbers': [ (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), (r'\d+[eE][+-]?[0-9]+', Number.Float), (r'0\d+', Number.Oct), (r'0[xX][a-fA-F0-9]+', Number.Hex), (r'\d+L', Number.Integer.Long), (r'\d+', Number.Integer) ], 'stringescape': [ (r'\"\"|\`([\,\%\`abfnrtv])', String.Escape), ], 'strings': [ (r'[^"\n]+', String), ], 'dqs': [ (r'"', String, '#pop'), include('strings') ], 'garbage': [ (r'[^\S\n]', Text), ], } class RexxLexer(RegexLexer): """ `Rexx <http://www.rexxinfo.org/>`_ is a scripting language available for a wide range of different platforms with its roots found on mainframe systems. It is popular for I/O- and data based tasks and can act as glue language to bind different applications together. *New in Pygments 1.7.* """ name = 'Rexx' aliases = ['rexx', 'ARexx', 'arexx'] filenames = ['*.rexx', '*.rex', '*.rx', '*.arexx'] mimetypes = ['text/x-rexx'] flags = re.IGNORECASE tokens = { 'root': [ (r'\s', Whitespace), (r'/\*', Comment.Multiline, 'comment'), (r'"', String, 'string_double'), (r"'", String, 'string_single'), (r'[0-9]+(\.[0-9]+)?(e[+-]?[0-9])?', Number), (r'([a-z_][a-z0-9_]*)(\s*)(:)(\s*)(procedure)\b', bygroups(Name.Function, Whitespace, Operator, Whitespace, Keyword.Declaration)), (r'([a-z_][a-z0-9_]*)(\s*)(:)', bygroups(Name.Label, Whitespace, Operator)), include('function'), include('keyword'), include('operator'), (r'[a-z_][a-z0-9_]*', Text), ], 'function': [ (r'(abbrev|abs|address|arg|b2x|bitand|bitor|bitxor|c2d|c2x|' r'center|charin|charout|chars|compare|condition|copies|d2c|' r'd2x|datatype|date|delstr|delword|digits|errortext|form|' r'format|fuzz|insert|lastpos|left|length|linein|lineout|lines|' r'max|min|overlay|pos|queued|random|reverse|right|sign|' r'sourceline|space|stream|strip|substr|subword|symbol|time|' r'trace|translate|trunc|value|verify|word|wordindex|' r'wordlength|wordpos|words|x2b|x2c|x2d|xrange)(\s*)(\()', bygroups(Name.Builtin, Whitespace, Operator)), ], 'keyword': [ (r'(address|arg|by|call|do|drop|else|end|exit|for|forever|if|' r'interpret|iterate|leave|nop|numeric|off|on|options|parse|' r'pull|push|queue|return|say|select|signal|to|then|trace|until|' r'while)\b', Keyword.Reserved), ], 'operator': [ (ur'(-|//|/|\(|\)|\*\*|\*|\\<<|\\<|\\==|\\=|\\>>|\\>|\\|\|\||\||' ur'&&|&|%|\+|<<=|<<|<=|<>|<|==|=|><|>=|>>=|>>|>|¬<<|¬<|¬==|¬=|' ur'¬>>|¬>|¬|\.|,)', Operator), ], 'string_double': [ (r'[^"\n]+', String), (r'""', String), (r'"', String, '#pop'), (r'\n', Text, '#pop'), # Stray linefeed also terminates strings. ], 'string_single': [ (r'[^\'\n]', String), (r'\'\'', String), (r'\'', String, '#pop'), (r'\n', Text, '#pop'), # Stray linefeed also terminates strings. ], 'comment': [ (r'[^*]+', Comment.Multiline), (r'\*/', Comment.Multiline, '#pop'), (r'\*', Comment.Multiline), ] } _c = lambda s: re.compile(s, re.MULTILINE) _ADDRESS_COMMAND_PATTERN = _c(r'^\s*address\s+command\b') _ADDRESS_PATTERN = _c(r'^\s*address\s+') _DO_WHILE_PATTERN = _c(r'^\s*do\s+while\b') _IF_THEN_DO_PATTERN = _c(r'^\s*if\b.+\bthen\s+do\s*$') _PROCEDURE_PATTERN = _c(r'^\s*([a-z_][a-z0-9_]*)(\s*)(:)(\s*)(procedure)\b') _ELSE_DO_PATTERN = _c(r'\belse\s+do\s*$') _PARSE_ARG_PATTERN = _c(r'^\s*parse\s+(upper\s+)?(arg|value)\b') PATTERNS_AND_WEIGHTS = ( (_ADDRESS_COMMAND_PATTERN, 0.2), (_ADDRESS_PATTERN, 0.05), (_DO_WHILE_PATTERN, 0.1), (_ELSE_DO_PATTERN, 0.1), (_IF_THEN_DO_PATTERN, 0.1), (_PROCEDURE_PATTERN, 0.5), (_PARSE_ARG_PATTERN, 0.2), ) def analyse_text(text): """ Check for inital comment and patterns that distinguish Rexx from other C-like languages. """ if re.search(r'/\*\**\s*rexx', text, re.IGNORECASE): # Header matches MVS Rexx requirements, this is certainly a Rexx # script. return 1.0 elif text.startswith('/*'): # Header matches general Rexx requirements; the source code might # still be any language using C comments such as C++, C# or Java. lowerText = text.lower() result = sum(weight for (pattern, weight) in RexxLexer.PATTERNS_AND_WEIGHTS if pattern.search(lowerText)) + 0.01 return min(result, 1.0)
mit
SteveHNH/ansible
lib/ansible/modules/system/make.py
24
4295
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2015, Linus Unnebäck <linus@folkdatorn.se> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: make short_description: Run targets in a Makefile requirements: [ make ] version_added: "2.1" author: Linus Unnebäck (@LinusU) <linus@folkdatorn.se> description: - Run targets in a Makefile. options: target: description: - The target to run required: false default: none params: description: - Any extra parameters to pass to make required: false default: none chdir: description: - cd into this directory before running make required: true ''' EXAMPLES = ''' # Build the default target - make: chdir: /home/ubuntu/cool-project # Run `install` target as root - make: chdir: /home/ubuntu/cool-project target: install become: yes # Pass in extra arguments to build - make: chdir: /home/ubuntu/cool-project target: all params: NUM_THREADS: 4 BACKEND: lapack ''' # TODO: Disabled the RETURN as it was breaking docs building. Someone needs to # fix this RETURN = '''# ''' from ansible.module_utils.six import iteritems from ansible.module_utils.basic import AnsibleModule def run_command(command, module, check_rc=True): """ Run a command using the module, return the result code and std{err,out} content. :param command: list of command arguments :param module: Ansible make module instance :return: return code, stdout content, stderr content """ rc, out, err = module.run_command(command, check_rc=check_rc, cwd=module.params['chdir']) return rc, sanitize_output(out), sanitize_output(err) def sanitize_output(output): """ Sanitize the output string before we pass it to module.fail_json. Defaults the string to empty if it is None, else strips trailing newlines. :param output: output to sanitize :return: sanitized output """ if output is None: return '' else: return output.rstrip("\r\n") def main(): module = AnsibleModule( supports_check_mode=True, argument_spec=dict( target=dict(required=False, default=None, type='str'), params=dict(required=False, default=None, type='dict'), chdir=dict(required=True, default=None, type='path'), ), ) # Build up the invocation of `make` we are going to use make_path = module.get_bin_path('make', True) make_target = module.params['target'] if module.params['params'] is not None: make_parameters = [k + '=' + str(v) for k, v in iteritems(module.params['params'])] else: make_parameters = [] base_command = [make_path, make_target] base_command.extend(make_parameters) # Check if the target is already up to date rc, out, err = run_command(base_command + ['--question'], module, check_rc=False) if module.check_mode: # If we've been asked to do a dry run, we only need # to report whether or not the target is up to date changed = (rc != 0) else: if rc == 0: # The target is up to date, so we don't have to # do anything changed = False else: # The target isn't upd to date, so we need to run it rc, out, err = run_command(base_command, module) changed = True # We don't report the return code, as if this module failed # we would be calling fail_json from run_command, so even if # we had a non-zero return code, we did not fail. However, if # we report a non-zero return code here, we will be marked as # failed regardless of what we signal using the failed= kwarg. module.exit_json( changed=changed, failed=False, stdout=out, stderr=err, target=module.params['target'], params=module.params['params'], chdir=module.params['chdir'] ) if __name__ == '__main__': main()
gpl-3.0
mydongistiny/external_chromium_org
tools/telemetry/telemetry/core/platform/tracing_category_filter.py
58
6027
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import re def CreateNoOverheadFilter(): """Returns a filter with the least overhead possible. This contains no sub-traces of thread tasks, so it's only useful for capturing the cpu-time spent on threads (as well as needed benchmark traces). FIXME: Remove webkit.console when blink.console lands in chromium and the ref builds are updated. crbug.com/386847 """ categories = [ "toplevel", "benchmark", "webkit.console", "blink.console", "trace_event_overhead" ] return TracingCategoryFilter(filter_string=','.join(categories)) def CreateMinimalOverheadFilter(): """Returns a filter with the best-effort amount of overhead.""" return TracingCategoryFilter(filter_string='') def CreateDebugOverheadFilter(): """Returns a filter with as many traces enabled as is useful.""" return TracingCategoryFilter(filter_string='*,disabled-by-default-cc.debug') _delay_re = re.compile(r'DELAY[(][A-Za-z0-9._;]+[)]') class TracingCategoryFilter(object): """A set of included and excluded categories that should be traced. The TraceCategoryFilter allows fine tuning of what data is traced. Basic choice of which tracers to use is done by TracingOptions. Providing filter_string=None gives the default category filter, which leaves what to trace up to the individual trace systems. """ def __init__(self, filter_string=None): self._included_categories = set() self._excluded_categories = set() self._disabled_by_default_categories = set() self._synthetic_delays = set() self.contains_wildcards = False if filter_string == None: return if '*' in filter_string or '?' in filter_string: self.contains_wildcards = True filter_set = set(filter_string.split(',')) for category in filter_set: if category == '': continue if _delay_re.match(category): self._synthetic_delays.add(category) continue if category[0] == '-': assert not category[1:] in self._included_categories self._excluded_categories.add(category[1:]) continue if category.startswith('disabled-by-default-'): self._disabled_by_default_categories.add(category) continue assert not category in self._excluded_categories self._included_categories.add(category) @property def included_categories(self): return self._included_categories @property def excluded_categories(self): return self._excluded_categories @property def disabled_by_default_categories(self): return self._disabled_by_default_categories @property def synthetic_delays(self): return self._synthetic_delays @property def filter_string(self): return self._GetFilterString(stable_output=False) @property def stable_filter_string(self): return self._GetFilterString(stable_output=True) def _GetFilterString(self, stable_output): # Note: This outputs fields in an order that intentionally matches # trace_event_impl's CategoryFilter string order. lists = [] lists.append(self._included_categories) lists.append(self._disabled_by_default_categories) lists.append(['-%s' % x for x in self._excluded_categories]) lists.append(self._synthetic_delays) categories = [] for l in lists: if stable_output: l = list(l) l.sort() categories.extend(l) return ','.join(categories) def AddIncludedCategory(self, category_glob): """Explicitly enables anything matching category_glob.""" assert not category_glob.startswith('disabled-by-default-') assert not category_glob in self._excluded_categories self._included_categories.add(category_glob) def AddExcludedCategory(self, category_glob): """Explicitly disables anything matching category_glob.""" assert not category_glob.startswith('disabled-by-default-') assert not category_glob in self._included_categories self._excluded_categories.add(category_glob) def AddSyntheticDelay(self, delay): assert _delay_re.match(delay) self._synthetic_delays.add(delay) def IsSubset(self, other): """ Determine if filter A (self) is a subset of filter B (other). Returns True if A is a subset of B, False if A is not a subset of B, and None if we can't tell for sure. """ # We don't handle filters with wildcards in this test. if self.contains_wildcards or other.contains_wildcards: return None # Disabled categories get into a trace if and only if they are contained in # the 'disabled' set. Return False if A's disabled set is not a subset of # B's disabled set. if not self.disabled_by_default_categories <= \ other.disabled_by_default_categories: return False # If A defines more or different synthetic delays than B, then A is not a # subset. if not self.synthetic_delays <= other.synthetic_delays: return False if self.included_categories and other.included_categories: # A and B have explicit include lists. If A includes something that B # doesn't, return False. if not self.included_categories <= other.included_categories: return False elif self.included_categories: # Only A has an explicit include list. If A includes something that B # excludes, return False. if self.included_categories.intersection(other.excluded_categories): return False elif other.included_categories: # Only B has an explicit include list. We don't know which categories are # contained in the default list, so return None. return None else: # None of the filter have explicit include list. If B excludes categories # that A doesn't exclude, return False. if not other.excluded_categories <= self.excluded_categories: return False return True
bsd-3-clause
rolandgeider/OpenSlides
openslides/core/serializers.py
1
1801
from openslides.utils.rest_api import Field, ModelSerializer, ValidationError from .models import ChatMessage, CustomSlide, Projector, Tag class JSONSerializerField(Field): """ Serializer for projector's JSONField. """ def to_internal_value(self, data): """ Checks that data is a dictionary. The key is a hex UUID and the value is a dictionary with must have a key 'name'. """ if type(data) is not dict: raise ValidationError({'detail': 'Data must be a dictionary.'}) for element in data.values(): if type(element) is not dict: raise ValidationError({'detail': 'Data must be a dictionary.'}) elif element.get('name') is None: raise ValidationError({'detail': "Every dictionary must have a key 'name'."}) return data class ProjectorSerializer(ModelSerializer): """ Serializer for core.models.Projector objects. """ config = JSONSerializerField(write_only=True) class Meta: model = Projector fields = ('id', 'config', 'elements', 'scale', 'scroll', ) class CustomSlideSerializer(ModelSerializer): """ Serializer for core.models.CustomSlide objects. """ class Meta: model = CustomSlide fields = ('id', 'title', 'text', 'weight', 'attachments', 'agenda_item_id') class TagSerializer(ModelSerializer): """ Serializer for core.models.Tag objects. """ class Meta: model = Tag fields = ('id', 'name', ) class ChatMessageSerializer(ModelSerializer): """ Serializer for core.models.ChatMessage objects. """ class Meta: model = ChatMessage fields = ('id', 'message', 'timestamp', 'user', ) read_only_fields = ('user', )
mit
RockySteveJobs/python-for-android
python3-alpha/python3-src/Lib/test/test_thread.py
50
7616
import os import unittest import random from test import support thread = support.import_module('_thread') import time import sys import weakref from test import lock_tests NUMTASKS = 10 NUMTRIPS = 3 _print_mutex = thread.allocate_lock() def verbose_print(arg): """Helper function for printing out debugging output.""" if support.verbose: with _print_mutex: print(arg) class BasicThreadTest(unittest.TestCase): def setUp(self): self.done_mutex = thread.allocate_lock() self.done_mutex.acquire() self.running_mutex = thread.allocate_lock() self.random_mutex = thread.allocate_lock() self.created = 0 self.running = 0 self.next_ident = 0 class ThreadRunningTests(BasicThreadTest): def newtask(self): with self.running_mutex: self.next_ident += 1 verbose_print("creating task %s" % self.next_ident) thread.start_new_thread(self.task, (self.next_ident,)) self.created += 1 self.running += 1 def task(self, ident): with self.random_mutex: delay = random.random() / 10000.0 verbose_print("task %s will run for %sus" % (ident, round(delay*1e6))) time.sleep(delay) verbose_print("task %s done" % ident) with self.running_mutex: self.running -= 1 if self.created == NUMTASKS and self.running == 0: self.done_mutex.release() def test_starting_threads(self): # Basic test for thread creation. for i in range(NUMTASKS): self.newtask() verbose_print("waiting for tasks to complete...") self.done_mutex.acquire() verbose_print("all tasks done") def test_stack_size(self): # Various stack size tests. self.assertEqual(thread.stack_size(), 0, "initial stack size is not 0") thread.stack_size(0) self.assertEqual(thread.stack_size(), 0, "stack_size not reset to default") if os.name not in ("nt", "os2", "posix"): return tss_supported = True try: thread.stack_size(4096) except ValueError: verbose_print("caught expected ValueError setting " "stack_size(4096)") except thread.error: tss_supported = False verbose_print("platform does not support changing thread stack " "size") if tss_supported: fail_msg = "stack_size(%d) failed - should succeed" for tss in (262144, 0x100000, 0): thread.stack_size(tss) self.assertEqual(thread.stack_size(), tss, fail_msg % tss) verbose_print("successfully set stack_size(%d)" % tss) for tss in (262144, 0x100000): verbose_print("trying stack_size = (%d)" % tss) self.next_ident = 0 self.created = 0 for i in range(NUMTASKS): self.newtask() verbose_print("waiting for all tasks to complete") self.done_mutex.acquire() verbose_print("all tasks done") thread.stack_size(0) def test__count(self): # Test the _count() function. orig = thread._count() mut = thread.allocate_lock() mut.acquire() started = [] def task(): started.append(None) mut.acquire() mut.release() thread.start_new_thread(task, ()) while not started: time.sleep(0.01) self.assertEqual(thread._count(), orig + 1) # Allow the task to finish. mut.release() # The only reliable way to be sure that the thread ended from the # interpreter's point of view is to wait for the function object to be # destroyed. done = [] wr = weakref.ref(task, lambda _: done.append(None)) del task while not done: time.sleep(0.01) self.assertEqual(thread._count(), orig) class Barrier: def __init__(self, num_threads): self.num_threads = num_threads self.waiting = 0 self.checkin_mutex = thread.allocate_lock() self.checkout_mutex = thread.allocate_lock() self.checkout_mutex.acquire() def enter(self): self.checkin_mutex.acquire() self.waiting = self.waiting + 1 if self.waiting == self.num_threads: self.waiting = self.num_threads - 1 self.checkout_mutex.release() return self.checkin_mutex.release() self.checkout_mutex.acquire() self.waiting = self.waiting - 1 if self.waiting == 0: self.checkin_mutex.release() return self.checkout_mutex.release() class BarrierTest(BasicThreadTest): def test_barrier(self): self.bar = Barrier(NUMTASKS) self.running = NUMTASKS for i in range(NUMTASKS): thread.start_new_thread(self.task2, (i,)) verbose_print("waiting for tasks to end") self.done_mutex.acquire() verbose_print("tasks done") def task2(self, ident): for i in range(NUMTRIPS): if ident == 0: # give it a good chance to enter the next # barrier before the others are all out # of the current one delay = 0 else: with self.random_mutex: delay = random.random() / 10000.0 verbose_print("task %s will run for %sus" % (ident, round(delay * 1e6))) time.sleep(delay) verbose_print("task %s entering %s" % (ident, i)) self.bar.enter() verbose_print("task %s leaving barrier" % ident) with self.running_mutex: self.running -= 1 # Must release mutex before releasing done, else the main thread can # exit and set mutex to None as part of global teardown; then # mutex.release() raises AttributeError. finished = self.running == 0 if finished: self.done_mutex.release() class LockTests(lock_tests.LockTests): locktype = thread.allocate_lock class TestForkInThread(unittest.TestCase): def setUp(self): self.read_fd, self.write_fd = os.pipe() @unittest.skipIf(sys.platform.startswith('win'), "This test is only appropriate for POSIX-like systems.") @support.reap_threads def test_forkinthread(self): def thread1(): try: pid = os.fork() # fork in a thread except RuntimeError: os._exit(1) # exit the child if pid == 0: # child try: os.close(self.read_fd) os.write(self.write_fd, b"OK") finally: os._exit(0) else: # parent os.close(self.write_fd) thread.start_new_thread(thread1, ()) self.assertEqual(os.read(self.read_fd, 2), b"OK", "Unable to fork() in thread") def tearDown(self): try: os.close(self.read_fd) except OSError: pass try: os.close(self.write_fd) except OSError: pass def test_main(): support.run_unittest(ThreadRunningTests, BarrierTest, LockTests, TestForkInThread) if __name__ == "__main__": test_main()
apache-2.0
HIIT/elfi
elfi/methods/mcmc.py
1
16593
"""MCMC sampling methods.""" import logging import numpy as np logger = logging.getLogger(__name__) # TODO: combine ESS and Rhat?, consider transforming parameters to allowed # region to increase acceptance ratio def eff_sample_size(chains): """Calculate the effective sample size for 1 or more chains. See: Gelman, Carlin, Stern, Dunson, Vehtari, Rubin: Bayesian Data Analysis, 2013. Stan modeling language user's guide and reference manual, v. 2.14.0. Parameters ---------- chains : np.array of shape (N,) or (M, N) Samples of a parameter from an MCMC algorithm. No burn-in subtracted here! Returns ------- ess : float """ chains = np.atleast_2d(chains) n_chains, n_samples = chains.shape means = np.mean(chains, axis=1) variances = np.var(chains, ddof=1, axis=1) var_between = 0 if n_chains == 1 else n_samples * np.var(means, ddof=1) var_within = np.mean(variances) var_pooled = ((n_samples - 1.) * var_within + var_between) / n_samples # autocovariances for lags 1..n_samples # https://en.wikipedia.org/wiki/Autocorrelation#Estimation n_padded = int(2**np.ceil(1 + np.log2(n_samples))) freqs = np.fft.rfft(chains - means[:, None], n_padded) autocov = np.fft.irfft(np.abs(freqs)**2)[:, :n_samples].real autocov = autocov / np.arange(n_samples, 0, -1) estimator_sum = 0. lag = 1 while lag < n_samples: # estimate multi-chain autocorrelation using variogram temp = 1. - (var_within - np.mean(autocov[:, lag])) / var_pooled # only use the first non-negative autocorrelations to avoid noise if temp >= 0: estimator_sum += temp lag += 1 else: break ess = n_chains * n_samples / (1. + 2. * estimator_sum) return ess def gelman_rubin(chains): r"""Calculate the Gelman--Rubin convergence statistic. Also known as the potential scale reduction factor, or \hat{R}. Uses the split version, as in Stan. See: Gelman, Carlin, Stern, Dunson, Vehtari, Rubin: Bayesian Data Analysis, 2013. Gelman, A. and D. B. Rubin: Inference from iterative simulation using multiple sequences (with discussion). Statistical Science, 7:457-511, 1992. Stan modeling language user's guide and reference manual, v. 2.14.0. Parameters ---------- chains : np.array of shape (M, N) Samples of a parameter from an MCMC algorithm, 1 row per chain. No burn-in subtracted here! Returns ------- psrf : float Should be below 1.1 to support convergence, or at least below 1.2 for all parameters. """ chains = np.atleast_2d(chains) n_chains, n_samples = chains.shape # split chains in the middle n_chains *= 2 n_samples //= 2 # drop 1 if odd chains = chains[:, :2 * n_samples].reshape((n_chains, n_samples)) means = np.mean(chains, axis=1) variances = np.var(chains, ddof=1, axis=1) var_between = n_samples * np.var(means, ddof=1) var_within = np.mean(variances) var_pooled = ((n_samples - 1.) * var_within + var_between) / n_samples # potential scale reduction factor, should be close to 1 psrf = np.sqrt(var_pooled / var_within) return psrf def nuts(n_iter, params0, target, grad_target, n_adapt=None, target_prob=0.6, max_depth=5, seed=0, info_freq=100, max_retry_inits=20, stepsize=None): r"""Sample the target using the NUTS algorithm. No-U-Turn Sampler, an improved version of the Hamiltonian (Markov Chain) Monte Carlo sampler. Based on Algorithm 6 in Hoffman & Gelman, depthMLR 15, 1351-1381, 2014. Parameters ---------- n_iter : int The number of iterations, including n_adapt and possible other warmup iterations. params0 : np.array Initial values for sampled parameters. target : function The target's log density to sample (possibly unnormalized). grad_target : function The gradient of target. n_adapt : int, optional The number of automatic adjustments to stepsize. Defaults to n_iter/2. target_prob : float, optional Desired average acceptance probability. (Parameter \delta in the original paper.) max_depth : int, optional Maximum recursion depth. seed : int, optional Seed for pseudo-random number generator. info_freq : int, optional How often to log progress to loglevel INFO. max_retry_inits : int, optional How many times to retry finding initial stepsize (if stepped outside allowed region). stepsize : float, optional Initial stepsize (will be still adapted). Defaults to finding by trial and error. Returns ------- samples : np.array Samples from the MCMC algorithm, including those during adaptation. """ random_state = np.random.RandomState(seed) n_adapt = n_adapt if n_adapt is not None else n_iter // 2 logger.info("NUTS: Performing {} iterations with {} adaptation steps.".format(n_iter, n_adapt)) target0 = target(params0) if np.isinf(target0): raise ValueError("NUTS: Bad initialization point {}, logpdf -> -inf.".format(params0)) # ******************************** # Find reasonable initial stepsize # ******************************** if stepsize is None: grad0 = grad_target(params0) logger.debug("NUTS: Trying to find initial stepsize from point {} with gradient {}.". format(params0, grad0)) init_tries = 0 while init_tries < max_retry_inits: # might step into region unallowed by priors stepsize = np.exp(-init_tries) init_tries += 1 momentum0 = random_state.randn(*params0.shape) # leapfrog momentum1 = momentum0 + 0.5 * stepsize * grad0 params1 = params0 + stepsize * momentum1 momentum1 += 0.5 * stepsize * grad_target(params1) joint0 = target0 - 0.5 * momentum0.dot(momentum0) joint1 = target(params1) - 0.5 * momentum1.dot(momentum1) if np.isfinite(joint1): break else: if init_tries == max_retry_inits: raise ValueError( "NUTS: Cannot find acceptable stepsize starting from point {}. All " "trials ended in region with 0 probability.".format(params0)) # logger.debug("momentum0 {}, momentum1 {}, params1 {}, joint0 {}, joint1 {}" # .format(momentum0, momentum1, params1, joint0, joint1)) logger.debug("NUTS: Problem finding acceptable stepsize, now {}. Retrying {}/{}." .format(stepsize, init_tries, max_retry_inits)) plusminus = 1 if np.exp(joint1 - joint0) > 0.5 else -1 factor = 2. if plusminus == 1 else 0.5 while factor * np.exp(plusminus * (joint1 - joint0)) > 1.: stepsize *= factor if stepsize == 0. or stepsize > 1e7: # bounds as in STAN raise SystemExit("NUTS: Found invalid stepsize {} starting from point {}." .format(stepsize, params0)) # leapfrog momentum1 = momentum0 + 0.5 * stepsize * grad0 params1 = params0 + stepsize * momentum1 momentum1 += 0.5 * stepsize * grad_target(params1) joint1 = target(params1) - 0.5 * momentum1.dot(momentum1) logger.debug("NUTS: Set initial stepsize {}.".format(stepsize)) # Some parameters from the NUTS paper, used for adapting the stepsize target_stepsize = np.log(10. * stepsize) log_avg_stepsize = 0. accept_ratio = 0. # tends to target_prob shrinkage = 0.05 # controls shrinkage accept_ratio to target_prob ii_offset = 10. # stabilizes initialization discount = -0.75 # reduce weight of past # ******** # Sampling # ******** samples = np.empty((n_iter + 1, ) + params0.shape) samples[0, :] = params0 n_diverged = 0 # counter for proposals whose error diverged n_outside = 0 # counter for proposals outside priors (pdf=0) n_total = 0 # total number of proposals for ii in range(1, n_iter + 1): momentum0 = random_state.randn(*params0.shape) samples_prev = samples[ii - 1, :] log_joint0 = target(samples_prev) - 0.5 * momentum0.dot(momentum0) log_slicevar = log_joint0 - random_state.exponential() samples[ii, :] = samples_prev params_left = samples_prev params_right = samples_prev momentum_left = momentum0 momentum_right = momentum0 depth = 0 n_ok = 1 all_ok = True # criteria for no U-turn, diverging error while all_ok and depth <= max_depth: direction = 1 if random_state.rand() < 0.5 else -1 if direction == -1: params_left, momentum_left, _, _, params1, n_sub, sub_ok, mh_ratio, n_steps, \ is_div, is_out = _build_tree_nuts( params_left, momentum_left, log_slicevar, -stepsize, depth, log_joint0, target, grad_target, random_state) else: _, _, params_right, momentum_right, params1, n_sub, sub_ok, mh_ratio, n_steps, \ is_div, is_out = _build_tree_nuts( params_right, momentum_right, log_slicevar, stepsize, depth, log_joint0, target, grad_target, random_state) if sub_ok == 1: if random_state.rand() < float(n_sub) / n_ok: samples[ii, :] = params1 # accept proposal n_ok += n_sub if not is_out: # params1 outside allowed region; don't count this as diverging error n_diverged += is_div n_outside += is_out n_total += n_steps all_ok = sub_ok and ((params_right - params_left).dot(momentum_left) >= 0) \ and ((params_right - params_left).dot(momentum_right) >= 0) depth += 1 if depth > max_depth: logger.debug("NUTS: Maximum recursion depth {} exceeded.".format(max_depth)) # adjust stepsize according to target acceptance ratio if ii <= n_adapt: accept_ratio = (1. - 1. / (ii + ii_offset)) * accept_ratio \ + (target_prob - float(mh_ratio) / n_steps) / (ii + ii_offset) log_stepsize = target_stepsize - np.sqrt(ii) / shrinkage * accept_ratio log_avg_stepsize = ii ** discount * log_stepsize + \ (1. - ii ** discount) * log_avg_stepsize stepsize = np.exp(log_stepsize) elif ii == n_adapt + 1: # adaptation/warmup finished stepsize = np.exp(log_avg_stepsize) # final stepsize n_diverged = 0 n_outside = 0 n_total = 0 logger.info("NUTS: Adaptation/warmup finished. Sampling...") logger.debug("NUTS: Set final stepsize {}.".format(stepsize)) if ii % info_freq == 0 and ii < n_iter: logger.info("NUTS: Iterations performed: {}/{}...".format(ii, n_iter)) info_str = "NUTS: Acceptance ratio: {:.3f}".format(float(n_iter - n_adapt) / n_total) if n_outside > 0: info_str += ". After warmup {} proposals were outside of the region allowed by priors " \ "and rejected, decreasing acceptance ratio.".format(n_outside) logger.info(info_str) if n_diverged > 0: logger.warning("NUTS: Diverged proposals after warmup (i.e. n_adapt={} steps): {}".format( n_adapt, n_diverged)) return samples[1:, :] def _build_tree_nuts(params, momentum, log_slicevar, step, depth, log_joint0, target, grad_target, random_state): """Recursively build a balanced binary tree needed by NUTS. Based on Algorithm 6 in Hoffman & Gelman, JMLR 15, 1351-1381, 2014. """ # Base case: one leapfrog step if depth == 0: momentum1 = momentum + 0.5 * step * grad_target(params) params1 = params + step * momentum1 momentum1 = momentum1 + 0.5 * step * grad_target(params1) log_joint = target(params1) - 0.5 * momentum1.dot(momentum1) n_ok = float(log_slicevar <= log_joint) sub_ok = log_slicevar < (1000. + log_joint) # check for diverging error is_out = False if not sub_ok: if np.isinf(target(params1)): # logpdf(params1) = -inf i.e. pdf(params1) = 0 is_out = True else: logger.debug( "NUTS: Diverging error: log_joint={}, params={}, params1={}, momentum={}, " "momentum1={}.".format(log_joint, params, params1, momentum, momentum1)) mh_ratio = 0. # reject else: mh_ratio = min(1., np.exp(log_joint - log_joint0)) return params1, momentum1, params1, momentum1, params1, n_ok, sub_ok, mh_ratio, 1., \ not sub_ok, is_out else: # Recursion to build subtrees, doubling size params_left, momentum_left, params_right, momentum_right, params1, n_sub, sub_ok, \ mh_ratio, n_steps, is_div, is_out = _build_tree_nuts( params, momentum, log_slicevar, step, depth - 1, log_joint0, target, grad_target, random_state) if sub_ok: # recurse further if step < 0: params_left, momentum_left, _, _, params2, n_sub2, sub_ok, mh_ratio2, n_steps2, \ is_div, is_out = _build_tree_nuts( params_left, momentum_left, log_slicevar, step, depth - 1, log_joint0, target, grad_target, random_state) else: _, _, params_right, momentum_right, params2, n_sub2, sub_ok, mh_ratio2, n_steps2, \ is_div, is_out = _build_tree_nuts( params_right, momentum_right, log_slicevar, step, depth - 1, log_joint0, target, grad_target, random_state) if n_sub2 > 0: if float(n_sub2) / (n_sub + n_sub2) > random_state.rand(): params1 = params2 # accept move mh_ratio += mh_ratio2 n_steps += n_steps2 sub_ok = sub_ok and ((params_right - params_left).dot(momentum_left) >= 0) \ and ((params_right - params_left).dot(momentum_right) >= 0) n_sub += n_sub2 return params_left, momentum_left, params_right, momentum_right, params1, n_sub, sub_ok, \ mh_ratio, n_steps, is_div, is_out def metropolis(n_samples, params0, target, sigma_proposals, warmup=0, seed=0): """Sample the target with a Metropolis Markov Chain Monte Carlo using Gaussian proposals. Parameters ---------- n_samples : int The number of requested samples. params0 : np.array Initial values for each sampled parameter. target : function The target log density to sample (possibly unnormalized). sigma_proposals : np.array Standard deviations for Gaussian proposals of each parameter. warmup : int Number of warmup samples. seed : int, optional Seed for pseudo-random number generator. Returns ------- samples : np.array """ random_state = np.random.RandomState(seed) samples = np.empty((n_samples + warmup + 1, ) + params0.shape) samples[0, :] = params0 target_current = target(params0) if np.isinf(target_current): raise ValueError( "Metropolis: Bad initialization point {},logpdf -> -inf.".format(params0)) n_accepted = 0 for ii in range(1, n_samples + warmup + 1): samples[ii, :] = samples[ii - 1, :] + sigma_proposals * random_state.randn(*params0.shape) target_prev = target_current target_current = target(samples[ii, :]) if ((np.exp(target_current - target_prev) < random_state.rand()) or np.isinf(target_current) or np.isnan(target_current)): # reject proposal samples[ii, :] = samples[ii - 1, :] target_current = target_prev else: n_accepted += 1 logger.info( "{}: Total acceptance ratio: {:.3f}".format(__name__, float(n_accepted) / (n_samples + warmup))) return samples[(1 + warmup):, :]
bsd-3-clause
afloren/nipype
nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py
9
1642
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from nipype.testing import assert_equal from nipype.interfaces.slicer.registration.brainsresample import BRAINSResample def test_BRAINSResample_inputs(): input_map = dict(args=dict(argstr='%s', ), defaultValue=dict(argstr='--defaultValue %f', ), deformationVolume=dict(argstr='--deformationVolume %s', ), environ=dict(nohash=True, usedefault=True, ), gridSpacing=dict(argstr='--gridSpacing %s', sep=',', ), ignore_exception=dict(nohash=True, usedefault=True, ), inputVolume=dict(argstr='--inputVolume %s', ), interpolationMode=dict(argstr='--interpolationMode %s', ), inverseTransform=dict(argstr='--inverseTransform ', ), numberOfThreads=dict(argstr='--numberOfThreads %d', ), outputVolume=dict(argstr='--outputVolume %s', hash_files=False, ), pixelType=dict(argstr='--pixelType %s', ), referenceVolume=dict(argstr='--referenceVolume %s', ), terminal_output=dict(nohash=True, ), warpTransform=dict(argstr='--warpTransform %s', ), ) inputs = BRAINSResample.input_spec() for key, metadata in input_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_BRAINSResample_outputs(): output_map = dict(outputVolume=dict(), ) outputs = BRAINSResample.output_spec() for key, metadata in output_map.items(): for metakey, value in metadata.items(): yield assert_equal, getattr(outputs.traits()[key], metakey), value
bsd-3-clause
RobertWWong/WebDev
djangoApp/ENV/lib/python3.5/site-packages/django/contrib/gis/db/backends/spatialite/introspection.py
59
3254
from django.contrib.gis.gdal import OGRGeomType from django.db.backends.sqlite3.introspection import ( DatabaseIntrospection, FlexibleFieldLookupDict, ) from django.utils import six class GeoFlexibleFieldLookupDict(FlexibleFieldLookupDict): """ Sublcass that includes updates the `base_data_types_reverse` dict for geometry field types. """ base_data_types_reverse = FlexibleFieldLookupDict.base_data_types_reverse.copy() base_data_types_reverse.update( {'point': 'GeometryField', 'linestring': 'GeometryField', 'polygon': 'GeometryField', 'multipoint': 'GeometryField', 'multilinestring': 'GeometryField', 'multipolygon': 'GeometryField', 'geometrycollection': 'GeometryField', }) class SpatiaLiteIntrospection(DatabaseIntrospection): data_types_reverse = GeoFlexibleFieldLookupDict() def get_geometry_type(self, table_name, geo_col): cursor = self.connection.cursor() try: # Querying the `geometry_columns` table to get additional metadata. cursor.execute('SELECT coord_dimension, srid, geometry_type ' 'FROM geometry_columns ' 'WHERE f_table_name=%s AND f_geometry_column=%s', (table_name, geo_col)) row = cursor.fetchone() if not row: raise Exception('Could not find a geometry column for "%s"."%s"' % (table_name, geo_col)) # OGRGeomType does not require GDAL and makes it easy to convert # from OGC geom type name to Django field. ogr_type = row[2] if isinstance(ogr_type, six.integer_types) and ogr_type > 1000: # SpatiaLite versions >= 4 use the new SFSQL 1.2 offsets # 1000 (Z), 2000 (M), and 3000 (ZM) to indicate the presence of # higher dimensional coordinates (M not yet supported by Django). ogr_type = ogr_type % 1000 + OGRGeomType.wkb25bit field_type = OGRGeomType(ogr_type).django # Getting any GeometryField keyword arguments that are not the default. dim = row[0] srid = row[1] field_params = {} if srid != 4326: field_params['srid'] = srid if (isinstance(dim, six.string_types) and 'Z' in dim) or dim == 3: field_params['dim'] = 3 finally: cursor.close() return field_type, field_params def get_constraints(self, cursor, table_name): constraints = super(SpatiaLiteIntrospection, self).get_constraints(cursor, table_name) cursor.execute('SELECT f_geometry_column ' 'FROM geometry_columns ' 'WHERE f_table_name=%s AND spatial_index_enabled=1', (table_name,)) for row in cursor.fetchall(): constraints['%s__spatial__index' % row[0]] = { "columns": [row[0]], "primary_key": False, "unique": False, "foreign_key": None, "check": False, "index": True, } return constraints
mit
fangxingli/hue
desktop/core/ext-py/Paste-2.0.1/paste/exceptions/errormiddleware.py
50
17105
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org) # Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php """ Error handler middleware """ import sys import traceback import cgi from six.moves import cStringIO as StringIO from paste.exceptions import formatter, collector, reporter from paste import wsgilib from paste import request import six __all__ = ['ErrorMiddleware', 'handle_exception'] class _NoDefault(object): def __repr__(self): return '<NoDefault>' NoDefault = _NoDefault() class ErrorMiddleware(object): """ Error handling middleware Usage:: error_catching_wsgi_app = ErrorMiddleware(wsgi_app) Settings: ``debug``: If true, then tracebacks will be shown in the browser. ``error_email``: an email address (or list of addresses) to send exception reports to ``error_log``: a filename to append tracebacks to ``show_exceptions_in_wsgi_errors``: If true, then errors will be printed to ``wsgi.errors`` (frequently a server error log, or stderr). ``from_address``, ``smtp_server``, ``error_subject_prefix``, ``smtp_username``, ``smtp_password``, ``smtp_use_tls``: variables to control the emailed exception reports ``error_message``: When debug mode is off, the error message to show to users. ``xmlhttp_key``: When this key (default ``_``) is in the request GET variables (not POST!), expect that this is an XMLHttpRequest, and the response should be more minimal; it should not be a complete HTML page. Environment Configuration: ``paste.throw_errors``: If this setting in the request environment is true, then this middleware is disabled. This can be useful in a testing situation where you don't want errors to be caught and transformed. ``paste.expected_exceptions``: When this middleware encounters an exception listed in this environment variable and when the ``start_response`` has not yet occurred, the exception will be re-raised instead of being caught. This should generally be set by middleware that may (but probably shouldn't be) installed above this middleware, and wants to get certain exceptions. Exceptions raised after ``start_response`` have been called are always caught since by definition they are no longer expected. """ def __init__(self, application, global_conf=None, debug=NoDefault, error_email=None, error_log=None, show_exceptions_in_wsgi_errors=NoDefault, from_address=None, smtp_server=None, smtp_username=None, smtp_password=None, smtp_use_tls=False, error_subject_prefix=None, error_message=None, xmlhttp_key=None): from paste.util import converters self.application = application # @@: global_conf should be handled elsewhere in a separate # function for the entry point if global_conf is None: global_conf = {} if debug is NoDefault: debug = converters.asbool(global_conf.get('debug')) if show_exceptions_in_wsgi_errors is NoDefault: show_exceptions_in_wsgi_errors = converters.asbool(global_conf.get('show_exceptions_in_wsgi_errors')) self.debug_mode = converters.asbool(debug) if error_email is None: error_email = (global_conf.get('error_email') or global_conf.get('admin_email') or global_conf.get('webmaster_email') or global_conf.get('sysadmin_email')) self.error_email = converters.aslist(error_email) self.error_log = error_log self.show_exceptions_in_wsgi_errors = show_exceptions_in_wsgi_errors if from_address is None: from_address = global_conf.get('error_from_address', 'errors@localhost') self.from_address = from_address if smtp_server is None: smtp_server = global_conf.get('smtp_server', 'localhost') self.smtp_server = smtp_server self.smtp_username = smtp_username or global_conf.get('smtp_username') self.smtp_password = smtp_password or global_conf.get('smtp_password') self.smtp_use_tls = smtp_use_tls or converters.asbool(global_conf.get('smtp_use_tls')) self.error_subject_prefix = error_subject_prefix or '' if error_message is None: error_message = global_conf.get('error_message') self.error_message = error_message if xmlhttp_key is None: xmlhttp_key = global_conf.get('xmlhttp_key', '_') self.xmlhttp_key = xmlhttp_key def __call__(self, environ, start_response): """ The WSGI application interface. """ # We want to be careful about not sending headers twice, # and the content type that the app has committed to (if there # is an exception in the iterator body of the response) if environ.get('paste.throw_errors'): return self.application(environ, start_response) environ['paste.throw_errors'] = True try: __traceback_supplement__ = Supplement, self, environ sr_checker = ResponseStartChecker(start_response) app_iter = self.application(environ, sr_checker) return self.make_catching_iter(app_iter, environ, sr_checker) except: exc_info = sys.exc_info() try: for expect in environ.get('paste.expected_exceptions', []): if isinstance(exc_info[1], expect): raise start_response('500 Internal Server Error', [('content-type', 'text/html')], exc_info) # @@: it would be nice to deal with bad content types here response = self.exception_handler(exc_info, environ) if six.PY3: response = response.encode('utf8') return [response] finally: # clean up locals... exc_info = None def make_catching_iter(self, app_iter, environ, sr_checker): if isinstance(app_iter, (list, tuple)): # These don't raise return app_iter return CatchingIter(app_iter, environ, sr_checker, self) def exception_handler(self, exc_info, environ): simple_html_error = False if self.xmlhttp_key: get_vars = request.parse_querystring(environ) if dict(get_vars).get(self.xmlhttp_key): simple_html_error = True return handle_exception( exc_info, environ['wsgi.errors'], html=True, debug_mode=self.debug_mode, error_email=self.error_email, error_log=self.error_log, show_exceptions_in_wsgi_errors=self.show_exceptions_in_wsgi_errors, error_email_from=self.from_address, smtp_server=self.smtp_server, smtp_username=self.smtp_username, smtp_password=self.smtp_password, smtp_use_tls=self.smtp_use_tls, error_subject_prefix=self.error_subject_prefix, error_message=self.error_message, simple_html_error=simple_html_error) class ResponseStartChecker(object): def __init__(self, start_response): self.start_response = start_response self.response_started = False def __call__(self, *args): self.response_started = True self.start_response(*args) class CatchingIter(object): """ A wrapper around the application iterator that will catch exceptions raised by the a generator, or by the close method, and display or report as necessary. """ def __init__(self, app_iter, environ, start_checker, error_middleware): self.app_iterable = app_iter self.app_iterator = iter(app_iter) self.environ = environ self.start_checker = start_checker self.error_middleware = error_middleware self.closed = False def __iter__(self): return self def next(self): __traceback_supplement__ = ( Supplement, self.error_middleware, self.environ) if self.closed: raise StopIteration try: return self.app_iterator.next() except StopIteration: self.closed = True close_response = self._close() if close_response is not None: return close_response else: raise StopIteration except: self.closed = True close_response = self._close() exc_info = sys.exc_info() response = self.error_middleware.exception_handler( exc_info, self.environ) if close_response is not None: response += ( '<hr noshade>Error in .close():<br>%s' % close_response) if not self.start_checker.response_started: self.start_checker('500 Internal Server Error', [('content-type', 'text/html')], exc_info) if six.PY3: response = response.encode('utf8') return response __next__ = next def close(self): # This should at least print something to stderr if the # close method fails at this point if not self.closed: self._close() def _close(self): """Close and return any error message""" if not hasattr(self.app_iterable, 'close'): return None try: self.app_iterable.close() return None except: close_response = self.error_middleware.exception_handler( sys.exc_info(), self.environ) return close_response class Supplement(object): """ This is a supplement used to display standard WSGI information in the traceback. """ def __init__(self, middleware, environ): self.middleware = middleware self.environ = environ self.source_url = request.construct_url(environ) def extraData(self): data = {} cgi_vars = data[('extra', 'CGI Variables')] = {} wsgi_vars = data[('extra', 'WSGI Variables')] = {} hide_vars = ['paste.config', 'wsgi.errors', 'wsgi.input', 'wsgi.multithread', 'wsgi.multiprocess', 'wsgi.run_once', 'wsgi.version', 'wsgi.url_scheme'] for name, value in self.environ.items(): if name.upper() == name: if value: cgi_vars[name] = value elif name not in hide_vars: wsgi_vars[name] = value if self.environ['wsgi.version'] != (1, 0): wsgi_vars['wsgi.version'] = self.environ['wsgi.version'] proc_desc = tuple([int(bool(self.environ[key])) for key in ('wsgi.multiprocess', 'wsgi.multithread', 'wsgi.run_once')]) wsgi_vars['wsgi process'] = self.process_combos[proc_desc] wsgi_vars['application'] = self.middleware.application if 'paste.config' in self.environ: data[('extra', 'Configuration')] = dict(self.environ['paste.config']) return data process_combos = { # multiprocess, multithread, run_once (0, 0, 0): 'Non-concurrent server', (0, 1, 0): 'Multithreaded', (1, 0, 0): 'Multiprocess', (1, 1, 0): 'Multi process AND threads (?)', (0, 0, 1): 'Non-concurrent CGI', (0, 1, 1): 'Multithread CGI (?)', (1, 0, 1): 'CGI', (1, 1, 1): 'Multi thread/process CGI (?)', } def handle_exception(exc_info, error_stream, html=True, debug_mode=False, error_email=None, error_log=None, show_exceptions_in_wsgi_errors=False, error_email_from='errors@localhost', smtp_server='localhost', smtp_username=None, smtp_password=None, smtp_use_tls=False, error_subject_prefix='', error_message=None, simple_html_error=False, ): """ For exception handling outside of a web context Use like:: import sys from paste.exceptions.errormiddleware import handle_exception try: do stuff except: handle_exception( sys.exc_info(), sys.stderr, html=False, ...other config...) If you want to report, but not fully catch the exception, call ``raise`` after ``handle_exception``, which (when given no argument) will reraise the exception. """ reported = False exc_data = collector.collect_exception(*exc_info) extra_data = '' if error_email: rep = reporter.EmailReporter( to_addresses=error_email, from_address=error_email_from, smtp_server=smtp_server, smtp_username=smtp_username, smtp_password=smtp_password, smtp_use_tls=smtp_use_tls, subject_prefix=error_subject_prefix) rep_err = send_report(rep, exc_data, html=html) if rep_err: extra_data += rep_err else: reported = True if error_log: rep = reporter.LogReporter( filename=error_log) rep_err = send_report(rep, exc_data, html=html) if rep_err: extra_data += rep_err else: reported = True if show_exceptions_in_wsgi_errors: rep = reporter.FileReporter( file=error_stream) rep_err = send_report(rep, exc_data, html=html) if rep_err: extra_data += rep_err else: reported = True else: line = ('Error - %s: %s\n' % (exc_data.exception_type, exc_data.exception_value)) if six.PY3: line = line.encode('utf8') error_stream.write(line) if html: if debug_mode and simple_html_error: return_error = formatter.format_html( exc_data, include_hidden_frames=False, include_reusable=False, show_extra_data=False) reported = True elif debug_mode and not simple_html_error: error_html = formatter.format_html( exc_data, include_hidden_frames=True, include_reusable=False) head_html = formatter.error_css + formatter.hide_display_js return_error = error_template( head_html, error_html, extra_data) extra_data = '' reported = True else: msg = error_message or ''' An error occurred. See the error logs for more information. (Turn debug on to display exception reports here) ''' return_error = error_template('', msg, '') else: return_error = None if not reported and error_stream: err_report = formatter.format_text(exc_data, show_hidden_frames=True) err_report += '\n' + '-'*60 + '\n' error_stream.write(err_report) if extra_data: error_stream.write(extra_data) return return_error def send_report(rep, exc_data, html=True): try: rep.report(exc_data) except: output = StringIO() traceback.print_exc(file=output) if html: return """ <p>Additionally an error occurred while sending the %s report: <pre>%s</pre> </p>""" % ( cgi.escape(str(rep)), output.getvalue()) else: return ( "Additionally an error occurred while sending the " "%s report:\n%s" % (str(rep), output.getvalue())) else: return '' def error_template(head_html, exception, extra): return ''' <html> <head> <title>Server Error</title> %s </head> <body> <h1>Server Error</h1> %s %s </body> </html>''' % (head_html, exception, extra) def make_error_middleware(app, global_conf, **kw): return ErrorMiddleware(app, global_conf=global_conf, **kw) doc_lines = ErrorMiddleware.__doc__.splitlines(True) for i in range(len(doc_lines)): if doc_lines[i].strip().startswith('Settings'): make_error_middleware.__doc__ = ''.join(doc_lines[i:]) break del i, doc_lines
apache-2.0
GoogleCloudPlatform/practical-ml-vision-book
07_training/serverlessml/flowers/classifier/model.py
1
4420
#!/usr/bin/env python # Copyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. import numpy as np import tensorflow as tf import tensorflow_hub as hub import os, shutil from tensorflow.data.experimental import AUTOTUNE os.environ['TFHUB_MODEL_LOAD_FORMAT'] = 'COMPRESSED' from flowers.utils.augment import * from flowers.utils.util import cleanup_dir from flowers.ingest.tfrecords import create_preproc_image CLASS_NAMES = 'daisy dandelion roses sunflowers tulips'.split() MODEL_IMG_SIZE = 224 # What mobilenet expects def create_model(opts, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS): regularizer = tf.keras.regularizers.l1_l2(opts['l1'] or 0, opts['l2'] or 0) layers = [ tf.keras.layers.experimental.preprocessing.RandomCrop( height=MODEL_IMG_SIZE, width=MODEL_IMG_SIZE, input_shape=(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), name='random/center_crop' ), tf.keras.layers.experimental.preprocessing.RandomFlip( mode='horizontal', name='random_lr_flip/none' ) ] if opts['with_color_distort']: layers.append( RandomColorDistortion(name='random_contrast_brightness/none') ) layers += [ hub.KerasLayer( "https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4", trainable=False, name='mobilenet_embedding'), tf.keras.layers.Dense(opts['num_hidden'] or 16, kernel_regularizer=regularizer, activation=tf.keras.activations.relu, name='dense_hidden'), tf.keras.layers.Dense(len(CLASS_NAMES), kernel_regularizer=regularizer, activation='softmax', name='flower_prob') ] # checkpoint and early stopping callbacks model_checkpoint_cb = tf.keras.callbacks.ModelCheckpoint( filepath='./chkpts', monitor='val_accuracy', mode='max', save_best_only=True) early_stopping_cb = tf.keras.callbacks.EarlyStopping( monitor='val_accuracy', mode='max', patience=2) # create model return tf.keras.Sequential(layers, name='flower_classification') def export_model(model, outdir, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS): def create_preproc_image_of_right_size(filename): return create_preproc_image(filename, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS) @tf.function(input_signature=[tf.TensorSpec([None,], dtype=tf.string)]) def predict_flower_type(filenames): input_images = tf.map_fn( create_preproc_image_of_right_size, filenames, fn_output_signature=tf.float32 ) batch_pred = model(input_images) # same as model.predict() top_prob = tf.math.reduce_max(batch_pred, axis=[1]) pred_label_index = tf.math.argmax(batch_pred, axis=1) pred_label = tf.gather(tf.convert_to_tensor(CLASS_NAMES), pred_label_index) return { 'probability': top_prob, 'flower_type_int': pred_label_index, 'flower_type_str': pred_label } outpath = os.path.join(outdir, 'flowers_model') cleanup_dir(outpath) model.save(outpath, signatures={ 'serving_default': predict_flower_type }) # ## License # Copyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
apache-2.0
jamescallmebrent/dagny
example/users/resources.py
1
2201
# -*- coding: utf-8 -*- from dagny import Resource, action from django.contrib.auth import forms, models from django.http import HttpResponse from django.shortcuts import get_object_or_404, redirect import simplejson class User(Resource): template_path_prefix = 'auth/' @action def index(self): self.users = models.User.objects.all() @index.render.json def index(self): return json_response([user_to_dict(user) for user in self.users]) @action def new(self): self.form = forms.UserCreationForm() @action def create(self): self.form = forms.UserCreationForm(self.request.POST) if self.form.is_valid(): self.user = self.form.save() return redirect('User#show', str(self.user.id)) response = self.new.render() response.status_code = 403 return response @action def show(self, user_id): self.user = get_object_or_404(models.User, id=int(user_id)) @show.render.json def show(self): return json_response(user_to_dict(self.user)) @action def edit(self, user_id): self.user = get_object_or_404(models.User, id=int(user_id)) self.form = forms.UserChangeForm(instance=self.user) @action def update(self, user_id): self.user = get_object_or_404(models.User, id=int(user_id)) self.form = forms.UserChangeForm(self.request.POST, instance=self.user) if self.form.is_valid(): self.form.save() return redirect('User#show', str(self.user.id)) response = self.edit.render() response.status_code = 403 return response @action def destroy(self, user_id): self.user = get_object_or_404(models.User, id=int(user_id)) self.user.delete() return redirect('User#index') def json_response(data): return HttpResponse(content=simplejson.dumps(data), content_type='application/json') def user_to_dict(user): return { "username": user.username, "first_name": user.first_name, "last_name": user.last_name }
unlicense
rreilink/grbl
doc/script/simple_stream.py
47
2395
#!/usr/bin/env python """\ Simple g-code streaming script for grbl Provided as an illustration of the basic communication interface for grbl. When grbl has finished parsing the g-code block, it will return an 'ok' or 'error' response. When the planner buffer is full, grbl will not send a response until the planner buffer clears space. G02/03 arcs are special exceptions, where they inject short line segments directly into the planner. So there may not be a response from grbl for the duration of the arc. --------------------- The MIT License (MIT) Copyright (c) 2012 Sungeun K. Jeon Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------- """ import serial import time # Open grbl serial port s = serial.Serial('/dev/tty.usbmodem1811',115200) # Open g-code file f = open('grbl.gcode','r'); # Wake up grbl s.write("\r\n\r\n") time.sleep(2) # Wait for grbl to initialize s.flushInput() # Flush startup text in serial input # Stream g-code to grbl for line in f: l = line.strip() # Strip all EOL characters for consistency print 'Sending: ' + l, s.write(l + '\n') # Send g-code block to grbl grbl_out = s.readline() # Wait for grbl response with carriage return print ' : ' + grbl_out.strip() # Wait here until grbl is finished to close serial port and file. raw_input(" Press <Enter> to exit and disable grbl.") # Close file and serial port f.close() s.close()
gpl-3.0
gautam1858/tensorflow
tensorflow/contrib/model_pruning/examples/cifar10/cifar10_train.py
60
5267
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A binary to train pruned CIFAR-10 using a single GPU. Accuracy: cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of data) as judged by cifar10_eval.py when target sparsity in cifar10_pruning_spec.pbtxt is set to zero Results: Sparsity | Accuracy after 150K steps -------- | ------------------------- 0% | 86% 50% | 86% 75% | TODO(suyoggupta) 90% | TODO(suyoggupta) 95% | 77% Usage: Please see the tutorial and website for how to download the CIFAR-10 data set, compile the program and train the model. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import datetime import sys import time import tensorflow as tf from tensorflow.contrib.model_pruning.examples.cifar10 import cifar10_pruning as cifar10 from tensorflow.contrib.model_pruning.python import pruning FLAGS = None def train(): """Train CIFAR-10 for a number of steps.""" with tf.Graph().as_default(): global_step = tf.contrib.framework.get_or_create_global_step() # Get images and labels for CIFAR-10. images, labels = cifar10.distorted_inputs() # Build a Graph that computes the logits predictions from the # inference model. logits = cifar10.inference(images) # Calculate loss. loss = cifar10.loss(logits, labels) # Build a Graph that trains the model with one batch of examples and # updates the model parameters. train_op = cifar10.train(loss, global_step) # Parse pruning hyperparameters pruning_hparams = pruning.get_pruning_hparams().parse(FLAGS.pruning_hparams) # Create a pruning object using the pruning hyperparameters pruning_obj = pruning.Pruning(pruning_hparams, global_step=global_step) # Use the pruning_obj to add ops to the training graph to update the masks # The conditional_mask_update_op will update the masks only when the # training step is in [begin_pruning_step, end_pruning_step] specified in # the pruning spec proto mask_update_op = pruning_obj.conditional_mask_update_op() # Use the pruning_obj to add summaries to the graph to track the sparsity # of each of the layers pruning_obj.add_pruning_summaries() class _LoggerHook(tf.train.SessionRunHook): """Logs loss and runtime.""" def begin(self): self._step = -1 def before_run(self, run_context): self._step += 1 self._start_time = time.time() return tf.train.SessionRunArgs(loss) # Asks for loss value. def after_run(self, run_context, run_values): duration = time.time() - self._start_time loss_value = run_values.results if self._step % 10 == 0: num_examples_per_step = 128 examples_per_sec = num_examples_per_step / duration sec_per_batch = float(duration) format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f ' 'sec/batch)') print(format_str % (datetime.datetime.now(), self._step, loss_value, examples_per_sec, sec_per_batch)) with tf.train.MonitoredTrainingSession( checkpoint_dir=FLAGS.train_dir, hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps), tf.train.NanTensorHook(loss), _LoggerHook()], config=tf.ConfigProto( log_device_placement=FLAGS.log_device_placement)) as mon_sess: while not mon_sess.should_stop(): mon_sess.run(train_op) # Update the masks mon_sess.run(mask_update_op) def main(argv=None): # pylint: disable=unused-argument cifar10.maybe_download_and_extract() if tf.gfile.Exists(FLAGS.train_dir): tf.gfile.DeleteRecursively(FLAGS.train_dir) tf.gfile.MakeDirs(FLAGS.train_dir) train() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--train_dir', type=str, default='/tmp/cifar10_train', help='Directory where to write event logs and checkpoint.') parser.add_argument( '--pruning_hparams', type=str, default='', help="""Comma separated list of pruning-related hyperparameters""") parser.add_argument( '--max_steps', type=int, default=1000000, help='Number of batches to run.') parser.add_argument( '--log_device_placement', type=bool, default=False, help='Whether to log device placement.') FLAGS, unparsed = parser.parse_known_args() tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
apache-2.0
prasanna08/oppia
core/domain/interaction_jobs_one_off.py
1
7849
# coding: utf-8 # # Copyright 2020 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """One-off jobs for interaction validation.""" from __future__ import absolute_import # pylint: disable=import-only-modules from __future__ import unicode_literals # pylint: disable=import-only-modules from core import jobs from core.domain import customization_args_util from core.domain import exp_fetchers from core.domain import interaction_registry from core.domain import rights_domain from core.domain import rights_manager from core.platform import models import python_utils (exp_models,) = models.Registry.import_models([ models.NAMES.exploration]) class DragAndDropSortInputInteractionOneOffJob( jobs.BaseMapReduceOneOffJobManager): """Job that produces a list of all (exploration, state) pairs that use the DragAndDropSortInput interaction and have invalid choices. """ @classmethod def entity_classes_to_map_over(cls): return [exp_models.ExplorationModel] @staticmethod def map(item): if item.deleted: return exp_status = rights_manager.get_exploration_rights(item.id).status if exp_status == rights_domain.ACTIVITY_STATUS_PRIVATE: return exploration = exp_fetchers.get_exploration_from_model(item) validation_errors = [] for state_name, state in exploration.states.items(): if state.interaction.id == 'DragAndDropSortInput': for answer_group_index, answer_group in enumerate( state.interaction.answer_groups): for rule_index, rule_spec in enumerate( answer_group.rule_specs): for rule_input in rule_spec.inputs: value = rule_spec.inputs[rule_input] if value == '' or value == []: validation_errors.append( 'State name: %s, AnswerGroup: %s,' % ( state_name, answer_group_index) + ' Rule input %s in rule with index %s' ' is empty. ' % (rule_input, rule_index)) if validation_errors: yield (item.id, validation_errors) @staticmethod def reduce(key, values): yield (key, values) class MultipleChoiceInteractionOneOffJob(jobs.BaseMapReduceOneOffJobManager): """Job that produces a list of all (exploration, state) pairs that use the Multiple selection interaction and have rules that do not correspond to any answer choices. """ @classmethod def entity_classes_to_map_over(cls): return [exp_models.ExplorationModel] @staticmethod def map(item): if item.deleted: return exploration = exp_fetchers.get_exploration_from_model(item) for state_name, state in exploration.states.items(): if state.interaction.id == 'MultipleChoiceInput': choices_length = len( state.interaction.customization_args['choices'].value) for answer_group_index, answer_group in enumerate( state.interaction.answer_groups): for rule_index, rule_spec in enumerate( answer_group.rule_specs): if rule_spec.inputs['x'] >= choices_length: yield ( item.id, 'State name: %s, AnswerGroup: %s,' % ( state_name.encode('utf-8'), answer_group_index) + ' Rule: %s is invalid.' % (rule_index) + '(Indices here are 0-indexed.)') @staticmethod def reduce(key, values): yield (key, values) class ItemSelectionInteractionOneOffJob(jobs.BaseMapReduceOneOffJobManager): """Job that produces a list of (exploration, state) pairs that use the item selection interaction and that have rules that do not match the answer choices. These probably need to be fixed manually. """ @classmethod def entity_classes_to_map_over(cls): return [exp_models.ExplorationModel] @staticmethod def map(item): if item.deleted: return exploration = exp_fetchers.get_exploration_from_model(item) for state_name, state in exploration.states.items(): if state.interaction.id == 'ItemSelectionInput': choices = [ choice.html for choice in state.interaction.customization_args[ 'choices'].value ] for group in state.interaction.answer_groups: for rule_spec in group.rule_specs: for rule_item in rule_spec.inputs['x']: if rule_item not in choices: yield ( item.id, '%s: %s' % ( state_name.encode('utf-8'), rule_item.encode('utf-8'))) @staticmethod def reduce(key, values): yield (key, values) class InteractionCustomizationArgsValidationOneOffJob( jobs.BaseMapReduceOneOffJobManager): """Job that produces a list of (exploration, state) pairs and validates customization args for all interactions. """ @classmethod def entity_classes_to_map_over(cls): return [exp_models.ExplorationModel] @staticmethod def map(item): if item.deleted: return exploration = exp_fetchers.get_exploration_from_model(item) error_messages = [] for _, state in exploration.states.items(): if state.interaction.id is None: continue try: ca_specs = ( interaction_registry.Registry.get_interaction_by_id( state.interaction.id).customization_arg_specs ) customization_args_dict = {} for ca_name in state.interaction.customization_args: customization_args_dict[ca_name] = ( state.interaction.customization_args[ ca_name].to_customization_arg_dict() ) customization_args_util.validate_customization_args_and_values( 'interaction', state.interaction.id, customization_args_dict, ca_specs, fail_on_validation_errors=True ) except Exception as e: error_messages.append( '%s: %s' % (state.interaction.id, python_utils.UNICODE(e))) if error_messages: yield ( 'Failed customization args validation for exp ' 'id %s' % item.id, ', '.join(error_messages)) @staticmethod def reduce(key, values): yield (key, values)
apache-2.0
sammyshj/gci
modules/s3/s3validators.py
2
119990
# -*- coding: utf-8 -*- """ Custom Validators @requires: U{B{I{gluon}} <http://web2py.com>} @copyright: (c) 2010-2013 Sahana Software Foundation @license: MIT Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ __all__ = ["single_phone_number_pattern", "multi_phone_number_pattern", "s3_single_phone_requires", "s3_phone_requires", "IS_ACL", "IS_ADD_PERSON_WIDGET", "IS_ADD_PERSON_WIDGET2", "IS_COMBO_BOX", "IS_FLOAT_AMOUNT", "IS_INT_AMOUNT", "IS_IN_SET_LAZY", "IS_HTML_COLOUR", "IS_LAT", "IS_LON", "IS_LOCATION", "IS_LOCATION_SELECTOR", "IS_LOCATION_SELECTOR2", "IS_ONE_OF", "IS_ONE_OF_EMPTY", "IS_ONE_OF_EMPTY_SELECT", "IS_NOT_ONE_OF", "IS_PROCESSED_IMAGE", "IS_SITE_SELECTOR", "IS_UTC_DATETIME", "IS_UTC_OFFSET", "QUANTITY_INV_ITEM", ] import re import time from datetime import datetime, timedelta try: import json # try stdlib (Python 2.6) except ImportError: try: import simplejson as json # try external module except: import gluon.contrib.simplejson as json # fallback to pure-Python module from gluon import * #from gluon import current #from gluon.dal import Field #from gluon.validators import IS_DATE_IN_RANGE, IS_MATCH, IS_NOT_IN_DB, IS_IN_SET, IS_INT_IN_RANGE, IS_FLOAT_IN_RANGE, IS_EMAIL from gluon.languages import lazyT from gluon.storage import Storage from gluon.validators import Validator from s3utils import S3DateTime, s3_orderby_fields, s3_unicode def translate(text): if text is None: return None elif isinstance(text, (str, unicode)): from globals import current if hasattr(current, "T"): return str(current.T(text)) return str(text) def options_sorter(x, y): return (s3_unicode(x[1]).upper() > s3_unicode(y[1]).upper() and 1) or -1 # ----------------------------------------------------------------------------- # Phone number requires # Multiple phone numbers can be separated by comma, slash, semi-colon. # (Semi-colon appears in Brazil OSM data.) # @ToDo: Need to beware of separators used inside phone numbers # (e.g. 555-1212, ext 9), so may need fancier validation if we see that. # @ToDo: Add tooltip giving list syntax, and warning against above. # (Current use is in importing OSM files, so isn't interactive.) # @ToDo: Code that should only have a single # should use # s3_single_phone_requires. Check what messaging assumes. phone_number_pattern = "\+?\s*[\s\-\.\(\)\d]+(?:(?: x| ext)\s?\d{1,5})?" single_phone_number_pattern = "%s$" % phone_number_pattern multi_phone_number_pattern = "%s(\s*(,|/|;)\s*%s)*$" % (phone_number_pattern, phone_number_pattern) s3_single_phone_requires = IS_MATCH(single_phone_number_pattern) s3_phone_requires = IS_MATCH(multi_phone_number_pattern, error_message=current.T("Invalid phone number!")) # ============================================================================= class IS_LAT(object): """ example: INPUT(_type="text", _name="name", requires=IS_LAT()) Latitude has to be in decimal degrees between -90 & 90 - we attempt to convert DMS format into decimal degrees """ def __init__(self, error_message = "Latitude/Northing should be between -90 & 90!" ): self.minimum = -90 self.maximum = 90 self.error_message = error_message # Tell s3_mark_required that this validator doesn't accept NULL values self.mark_required = True # ------------------------------------------------------------------------- def __call__(self, value): try: value = float(value) if self.minimum <= value <= self.maximum: return (value, None) else: return (value, self.error_message) except: pattern = re.compile("^[0-9]{,3}[\D\W][0-9]{,3}[\D\W][0-9]+$") if not pattern.match(value): return (value, self.error_message) else: val = [] val.append(value) sep = [] count = 0 for i in val[0]: try: int(i) count += 1 except: sep.append(count) count += 1 sec = "" posn = sep[1] while posn != (count-1): sec = sec + val[0][posn+1]#to join the numbers for seconds posn += 1 posn2 = sep[0] mins = "" while posn2 != (sep[1]-1): mins = mins + val[0][posn2+1]# to join the numbers for minutes posn2 += 1 deg = "" posn3 = 0 while posn3 != (sep[0]): deg = deg + val[0][posn3] # to join the numbers for degree posn3 += 1 e = int(sec)/60 #formula to get back decimal degree f = int(mins) + e #formula g = int(f) / 60 #formula value = int(deg) + g return (value, None) # ============================================================================= class IS_LON(object): """ example: INPUT(_type="text", _name="name", requires=IS_LON()) Longitude has to be in decimal degrees between -180 & 180 - we attempt to convert DMS format into decimal degrees """ def __init__(self, error_message = "Longitude/Easting should be between -180 & 180!" ): self.minimum = -180 self.maximum = 180 self.error_message = error_message # Tell s3_mark_required that this validator doesn't accept NULL values self.mark_required = True # ------------------------------------------------------------------------- def __call__(self, value): try: value = float(value) if self.minimum <= value <= self.maximum: return (value, None) else: return (value, self.error_message) except: pattern = re.compile("^[0-9]{,3}[\D\W][0-9]{,3}[\D\W][0-9]+$") if not pattern.match(value): return (value, self.error_message) else: val = [] val.append(value) sep = [] count = 0 for i in val[0]: try: int(i) count += 1 except: sep.append(count) count += 1 sec = "" posn = sep[1] while posn != (count-1): sec = sec + val[0][posn+1]#to join the numbers for seconds posn += 1 posn2 = sep[0] mins = "" while posn2 != (sep[1]-1): mins = mins + val[0][posn2+1]# to join the numbers for minutes posn2 += 1 deg = "" posn3 = 0 while posn3 != (sep[0]): deg = deg + val[0][posn3] # to join the numbers for degree posn3 += 1 e = int(sec)/60 #formula to get back decimal degree f = int(mins) + e #formula g = int(f) / 60 #formula value = int(deg) + g return (value, None) # ============================================================================= class IS_NUMBER(object): """ Used by s3data.py to wrap IS_INT_AMOUNT & IS_LOAT_AMOUNT """ # ------------------------------------------------------------------------- @staticmethod def represent(number, precision=2): if number is None: return "" if isinstance(number, int): return IS_INT_AMOUNT.represent(number) elif isinstance(number, float): return IS_FLOAT_AMOUNT.represent(number, precision) else: return number # ============================================================================= class IS_INT_AMOUNT(IS_INT_IN_RANGE): """ Validation, widget and representation of integer-values with thousands-separators """ def __init__(self, minimum=None, maximum=None, error_message=None): IS_INT_IN_RANGE.__init__(self, minimum=minimum, maximum=maximum, error_message=error_message) # ------------------------------------------------------------------------- def __call__(self, value): thousands_sep = "," value = str(value).replace(thousands_sep, "") return IS_INT_IN_RANGE.__call__(self, value) # ------------------------------------------------------------------------- @staticmethod def represent(number): """ Change the format of the number depending on the language Based on https://code.djangoproject.com/browser/django/trunk/django/utils/numberformat.py """ if number is None: return "" try: intnumber = int(number) except: intnumber = number settings = current.deployment_settings THOUSAND_SEPARATOR = settings.get_L10n_thousands_separator() NUMBER_GROUPING = settings.get_L10n_thousands_grouping() # The negative/positive sign for the number if float(number) < 0: sign = "-" else: sign = "" str_number = unicode(intnumber) if str_number[0] == "-": str_number = str_number[1:] # Walk backwards over the integer part, inserting the separator as we go int_part_gd = "" for cnt, digit in enumerate(str_number[::-1]): if cnt and not cnt % NUMBER_GROUPING: int_part_gd += THOUSAND_SEPARATOR int_part_gd += digit int_part = int_part_gd[::-1] return sign + int_part # ------------------------------------------------------------------------- @staticmethod def widget(f, v, **attributes): from gluon.sqlhtml import StringWidget attr = Storage(attributes) classes = attr.get("_class", "").split(" ") classes = " ".join([c for c in classes if c != "integer"]) _class = "%s int_amount" % classes attr.update(_class=_class) return StringWidget.widget(f, v, **attr) # ============================================================================= class IS_FLOAT_AMOUNT(IS_FLOAT_IN_RANGE): """ Validation, widget and representation of float-values with thousands-separators """ def __init__(self, minimum=None, maximum=None, error_message=None, dot="."): IS_FLOAT_IN_RANGE.__init__(self, minimum=minimum, maximum=maximum, error_message=error_message, dot=dot) # ------------------------------------------------------------------------- def __call__(self, value): thousands_sep = "," value = str(value).replace(thousands_sep, "") return IS_FLOAT_IN_RANGE.__call__(self, value) # ------------------------------------------------------------------------- @staticmethod def represent(number, precision=None): """ Change the format of the number depending on the language Based on https://code.djangoproject.com/browser/django/trunk/django/utils/numberformat.py """ if number is None: return "" DECIMAL_SEPARATOR = current.deployment_settings.get_L10n_decimal_separator() str_number = unicode(number) if "." in str_number: int_part, dec_part = str_number.split(".") if precision is not None: dec_part = dec_part[:precision] else: int_part, dec_part = str_number, "" if int(dec_part) == 0: dec_part = "" elif precision is not None: dec_part = dec_part + ("0" * (precision - len(dec_part))) if dec_part: dec_part = DECIMAL_SEPARATOR + dec_part int_part = IS_INT_AMOUNT.represent(int(int_part)) return int_part + dec_part # ------------------------------------------------------------------------- @staticmethod def widget(f, v, **attributes): from gluon.sqlhtml import StringWidget attr = Storage(attributes) classes = attr.get("_class", "").split(" ") classes = " ".join([c for c in classes if c != "double"]) _class = "%s float_amount" % classes attr.update(_class=_class) return StringWidget.widget(f, v, **attr) # ============================================================================= class IS_HTML_COLOUR(IS_MATCH): """ example:: INPUT(_type="text", _name="name", requires=IS_HTML_COLOUR()) """ def __init__(self, error_message="must be a 6 digit hex code! (format: rrggbb)" ): IS_MATCH.__init__(self, "^[0-9a-fA-F]{6}$", error_message) # ============================================================================= regex1 = re.compile("[\w_]+\.[\w_]+") regex2 = re.compile("%\((?P<name>[^\)]+)\)s") class IS_ONE_OF_EMPTY(Validator): """ Filtered version of IS_IN_DB(): validates a given value as key of another table, filtered by the 'filterby' field for one of the 'filter_opts' options (=a selective IS_IN_DB()) NB Filtering isn't active in GQL. For the dropdown representation: 'label' can be a string template for the record, or a set of field names of the fields to be used as option labels, or a function or lambda to create an option label from the respective record (which has to return a string, of course). The function will take the record as an argument. No 'options' method as designed to be called next to an Autocomplete field so don't download a large dropdown unnecessarily. """ def __init__(self, dbset, field, label=None, filterby=None, filter_opts=None, not_filterby=None, not_filter_opts=None, realms=None, updateable=False, instance_types=None, error_message="invalid value!", orderby=None, groupby=None, left=None, multiple=False, zero="", sort=True, _and=None, ): """ Validator for foreign keys. @param dbset: a Set of records like db(query), or db itself @param field: the field in the referenced table @param label: lookup method for the label corresponding a value, alternatively a string template to be filled with values from the record @param filterby: a field in the referenced table to filter by @param filter_opts: values for the filterby field which indicate records to include @param not_filterby: a field in the referenced table to filter by @param not_filter_opts: values for not_filterby field which indicate records to exclude @param realms: only include records belonging to the listed realms (if None, all readable records will be included) @param updateable: only include records in the referenced table which can be updated by the user (if False, all readable records will be included) @param instance_types: if the referenced table is a super-entity, then only include these instance types (this parameter is required for super entity lookups!) @param error_message: the error message to return for failed validation @param orderby: orderby for the options @param groupby: groupby for the options @param left: additional left joins required for the options lookup (super-entity instance left joins will be included automatically) @param multiple: allow multiple values (for list:reference types) @param zero: add this as label for the None-option (allow selection of "None") @param sort: sort options alphabetically by their label @param _and: internal use """ if hasattr(dbset, "define_table"): self.dbset = dbset() else: self.dbset = dbset (ktable, kfield) = str(field).split(".") if not label: label = "%%(%s)s" % kfield if isinstance(label, str): if regex1.match(str(label)): label = "%%(%s)s" % str(label).split(".")[-1] ks = regex2.findall(label) if not kfield in ks: ks += [kfield] fields = ["%s.%s" % (ktable, k) for k in ks] elif hasattr(label, "bulk"): # S3Represent ks = [kfield] if label.custom_lookup: # Represent uses a custom lookup, so we only # retrieve the keys here fields = [kfield] orderby = field else: # Represent uses a standard field lookup, so # we can do that right here label._setup() fields = list(label.fields) if kfield not in fields: fields.insert(0, kfield) # Unlikely, but possible: represent and validator # using different keys - commented for now for # performance reasons (re-enable if ever necessary) #key = label.key #if key and key not in fields: #fields.insert(0, key) else: ks = [kfield] try: table = current.s3db[ktable] fields =[str(f) for f in table if f.name not in ("wkt", "the_geom")] except RuntimeError: fields = "all" self.fields = fields self.label = label self.ktable = ktable if not kfield or not len(kfield): self.kfield = "id" else: self.kfield = kfield self.ks = ks self.error_message = error_message self.theset = None self.orderby = orderby self.groupby = groupby self.left = left self.multiple = multiple self.zero = zero self.sort = sort self._and = _and self.filterby = filterby self.filter_opts = filter_opts self.not_filterby = not_filterby self.not_filter_opts = not_filter_opts self.realms = realms self.updateable = updateable self.instance_types = instance_types # ------------------------------------------------------------------------- def set_self_id(self, id): if self._and: self._and.record_id = id # ------------------------------------------------------------------------- def set_filter(self, filterby = None, filter_opts = None, not_filterby = None, not_filter_opts = None): """ This can be called from prep to apply a filter base on data in the record or the primary resource id. """ if filterby: self.filterby = filterby if filter_opts: self.filter_opts = filter_opts if not_filterby: self.not_filterby = not_filterby if not_filter_opts: self.not_filter_opts = not_filter_opts # ------------------------------------------------------------------------- def build_set(self): dbset = self.dbset db = dbset._db ktablename = self.ktable if ktablename not in db: table = current.s3db.table(ktablename, db_only=True) else: table = db[ktablename] if table: if self.fields == "all": fields = [table[f] for f in table.fields if f not in ("wkt", "the_geom")] else: fieldnames = [f.split(".")[1] if "." in f else f for f in self.fields] fields = [table[k] for k in fieldnames if k in table.fields] if db._dbname not in ("gql", "gae"): orderby = self.orderby or reduce(lambda a, b: a|b, fields) groupby = self.groupby # Caching breaks Colorbox dropdown refreshes #dd = dict(orderby=orderby, groupby=groupby, cache=(current.cache.ram, 60)) dd = dict(orderby=orderby, groupby=groupby) method = "update" if self.updateable else "read" query, left = self.accessible_query(method, table, instance_types=self.instance_types) if "deleted" in table: query &= (table["deleted"] != True) # Realms filter? if self.realms: auth = current.auth if auth.is_logged_in() and \ auth.get_system_roles().ADMIN in auth.user.realms: # Admin doesn't filter pass else: query &= auth.permission.realm_query(table, self.realms) all_fields = [str(f) for f in fields] filterby = self.filterby if filterby and filterby in table: filter_opts = self.filter_opts if filter_opts: if None in filter_opts: # Needs special handling (doesn't show up in 'belongs') _query = (table[filterby] == None) filter_opts = [f for f in filter_opts if f is not None] if filter_opts: _query = _query | (table[filterby].belongs(filter_opts)) query &= _query else: query &= (table[filterby].belongs(filter_opts)) if not self.orderby: filterby_field = table[filterby] dd.update(orderby=filterby_field) if str(filterby_field) not in all_fields: fields.append(filterby_field) all_fields.append(str(filterby_field)) not_filterby = self.not_filterby if not_filterby and not_filterby in table: not_filter_opts = self.not_filter_opts if not_filter_opts: if None in not_filter_opts: # Needs special handling (doesn't show up in 'belongs') _query = (table[not_filterby] == None) not_filter_opts = [f for f in not_filter_opts if f is not None] if not_filter_opts: _query = _query | (table[not_filterby].belongs(not_filter_opts)) query &= (~_query) else: query &= (~(table[not_filterby].belongs(not_filter_opts))) if not self.orderby: filterby_field = table[not_filterby] dd.update(orderby=filterby_field) if str(filterby_field) not in all_fields: fields.append(filterby_field) all_fields.append(str(filterby_field)) if left is not None: if self.left is not None: if not isinstance(left, list): left = [left] ljoins = [str(join) for join in self.left] for join in left: ljoin = str(join) if ljoin not in ljoins: self.left.append(join) ljoins.append(ljoin) else: self.left = left if self.left is not None: dd.update(left=self.left) # Make sure we have all ORDERBY fields in the query # (otherwise postgresql will complain) fieldnames = [str(f) for f in fields] for f in s3_orderby_fields(table, dd.get("orderby")): if str(f) not in fieldnames: fields.append(f) fieldnames.append(str(f)) records = dbset(query).select(distinct=True, *fields, **dd) else: # Note this does not support filtering. orderby = self.orderby or \ reduce(lambda a, b: a|b, (f for f in fields if f.type != "id")) # Caching breaks Colorbox dropdown refreshes #dd = dict(orderby=orderby, cache=(current.cache.ram, 60)) dd = dict(orderby=orderby) records = dbset.select(db[self.ktable].ALL, **dd) self.theset = [str(r[self.kfield]) for r in records] label = self.label try: # Is callable if hasattr(label, "bulk"): # S3Represent => use bulk option d = label.bulk(None, rows=records, list_type=False, show_link=False) labels = [d.get(r[self.kfield], d[None]) for r in records] else: # Standard representation function labels = map(label, records) except TypeError: if isinstance(label, str): labels = map(lambda r: label % dict(r), records) elif isinstance(label, (list, tuple)): labels = map(lambda r: \ " ".join([r[l] for l in label if l in r]), records) elif "name" in table: labels = map(lambda r: r.name, records) else: labels = map(lambda r: r[self.kfield], records) self.labels = labels if labels and self.sort: items = zip(self.theset, self.labels) # Alternative variant that handles generator objects, # doesn't seem necessary, retained here just in case: #orig_labels = self.labels #orig_theset = self.theset #items = [] #for i in xrange(len(orig_theset)): #label = orig_labels[i] ##if hasattr(label, "flatten"): ##try: ##label = label.flatten() ##except: ##pass #items.append((orig_theset[i], label)) items.sort(key=lambda item: s3_unicode(item[1]).lower()) self.theset, self.labels = zip(*items) else: self.theset = None self.labels = None # ------------------------------------------------------------------------- @classmethod def accessible_query(cls, method, table, instance_types=None): """ Returns an accessible query (and left joins, if necessary) for records in table the user is permitted to access with method @param method: the method (e.g. "read" or "update") @param table: the table @param instance_types: list of instance tablenames, if table is a super-entity (required in this case!) @return: tuple (query, left) where query is the query and left joins is the list of left joins required for the query @note: for higher security policies and super-entities with many instance types this can give a very complex query. Try to always limit the instance types to what is really needed """ DEFAULT = (table._id > 0) left = None if "instance_type" in table: # Super-entity if not instance_types: return DEFAULT, left query = None auth = current.auth s3db = current.s3db for instance_type in instance_types: itable = s3db.table(instance_type) if itable is None: continue join = itable.on(itable[table._id.name] == table._id) if left is None: left = [join] else: left.append(join) q = (itable._id != None) & \ auth.s3_accessible_query(method, itable) if "deleted" in itable: q &= itable.deleted != True if query is None: query = q else: query |= q if query is None: query = DEFAULT else: query = current.auth.s3_accessible_query(method, table) return query, left # ------------------------------------------------------------------------- # Removed as we don't want any options downloaded unnecessarily #def options(self): # ------------------------------------------------------------------------- def __call__(self, value): try: dbset = self.dbset table = dbset._db[self.ktable] deleted_q = ("deleted" in table) and (table["deleted"] == False) or False filter_opts_q = False filterby = self.filterby if filterby and filterby in table: filter_opts = self.filter_opts if filter_opts: if None in filter_opts: # Needs special handling (doesn't show up in 'belongs') filter_opts_q = (table[filterby] == None) filter_opts = [f for f in filter_opts if f is not None] if filter_opts: filter_opts_q |= (table[filterby].belongs(filter_opts)) else: filter_opts_q = (table[filterby].belongs(filter_opts)) if self.multiple: if isinstance(value, list): values = [str(v) for v in value] elif isinstance(value, basestring) and \ value[0] == "|" and value[-1] == "|": values = value[1:-1].split("|") elif value: values = [value] else: values = [] if self.theset: if not [x for x in values if not x in self.theset]: return (values, None) else: return (value, self.error_message) else: field = table[self.kfield] query = None for v in values: q = (field == v) query = query is not None and query | q or q if filter_opts_q != False: query = query is not None and \ (filter_opts_q & (query)) or filter_opts_q if deleted_q != False: query = query is not None and \ (deleted_q & (query)) or deleted_q if dbset(query).count() < 1: return (value, self.error_message) return (values, None) elif self.theset: if str(value) in self.theset: if self._and: return self._and(value) else: return (value, None) else: values = [value] query = None for v in values: q = (table[self.kfield] == v) query = query is not None and query | q or q if filter_opts_q != False: query = query is not None and \ (filter_opts_q & (query)) or filter_opts_q if deleted_q != False: query = query is not None and \ (deleted_q & (query)) or deleted_q if dbset(query).count(): if self._and: return self._and(value) else: return (value, None) except: pass return (value, self.error_message) # ============================================================================= class IS_ONE_OF(IS_ONE_OF_EMPTY): """ Extends IS_ONE_OF_EMPTY by restoring the 'options' method. """ def options(self, zero=True): self.build_set() theset, labels = self.theset, self.labels if theset is None or labels is None: items = [] else: items = zip(theset, labels) if zero and self.zero is not None and not self.multiple: items.insert(0, ("", self.zero)) return items # ============================================================================= class IS_ONE_OF_EMPTY_SELECT(IS_ONE_OF_EMPTY): """ Extends IS_ONE_OF_EMPTY by displaying an empty SELECT (instead of INPUT) """ def options(self, zero=True): return [("", "")] # ============================================================================= class IS_NOT_ONE_OF(IS_NOT_IN_DB): """ Filtered version of IS_NOT_IN_DB() - understands the 'deleted' field. - makes the field unique (amongst non-deleted field) Example: - INPUT(_type="text", _name="name", requires=IS_NOT_ONE_OF(db, db.table)) """ def __call__(self, value): value = str(value) if not value.strip(): return (value, translate(self.error_message)) if value in self.allowed_override: return (value, None) (tablename, fieldname) = str(self.field).split(".") dbset = self.dbset table = dbset.db[tablename] field = table[fieldname] query = (field == value) if "deleted" in table: query = (table["deleted"] == False) & query rows = dbset(query).select(limitby=(0, 1)) if len(rows) > 0: if isinstance(self.record_id, dict): for f in self.record_id: if str(getattr(rows[0], f)) != str(self.record_id[f]): return (value, translate(self.error_message)) elif str(rows[0][table._id.name]) != str(self.record_id): return (value, translate(self.error_message)) return (value, None) # ============================================================================= class IS_LOCATION(Validator): """ Allow all locations, or locations by level. """ def __init__(self, level = None, error_message = None ): self.level = level # can be a List or a single element self.error_message = error_message # Make it like IS_ONE_OF to support AddResourceLink self.ktable = "gis_location" self.kfield = "id" # Tell s3_mark_required that this validator doesn't accept NULL values self.mark_required = True # ------------------------------------------------------------------------- def __call__(self, value): level = self.level if level == "L0": # Use cached countries. This returns name if id is for a country. try: location_id = int(value) except ValueError: ok = False else: ok = current.gis.get_country(location_id) else: db = current.db table = db.gis_location query = (table.id == value) & (table.deleted == False) if level: if not hasattr(level, "strip") and \ (hasattr(level, "__getitem__") or \ hasattr(level, "__iter__")): # List or Tuple if None in level: # None needs special handling level = [l for l in level if l is not None] query &= ((table.level.belongs(level)) | \ (table.level == None)) else: query &= (table.level.belongs(level)) else: query &= (table.level == level) ok = db(query).select(table.id, limitby=(0, 1)) if ok: return (value, None) else: return (value, self.error_message or current.T("Invalid Location!")) # ============================================================================= class IS_LOCATION_SELECTOR(Validator): """ Designed for use within the S3LocationSelectorWidget. For Create forms, this will create a new location from the additional fields For Update forms, this will check that we have a valid location_id FK and update any changes @ToDo: Audit """ def __init__(self, error_message = None, ): self.error_message = error_message self.errors = Storage() self.id = None # Tell s3_mark_required that this validator doesn't accept NULL values self.mark_required = True # ------------------------------------------------------------------------- def __call__(self, value): if current.response.s3.bulk: # Pointless in imports return (value, None) db = current.db table = db.gis_location if value == "dummy": # Create form if not current.auth.s3_has_permission("create", table): return (None, current.auth.messages.access_denied) location = self._process_values() if self.errors: errors = self.errors error = "" for e in errors: error = "%s\n%s" % (error, errors[e]) if error else errors[e] return (None, error) if location.name or location.lat or location.lon or location.wkt or \ location.street or location.postcode or location.parent: vars = dict(name = location.name, lat = location.lat, lon = location.lon, wkt = location.wkt, gis_feature_type = location.gis_feature_type, addr_street = location.street, addr_postcode = location.postcode, parent = location.parent, lon_min = location.lon_min, lon_max = location.lon_max, lat_min = location.lat_min, lat_max = location.lat_max ) if vars["wkt"] and current.deployment_settings.get_gis_spatialdb(): # Also populate the spatial field vars["the_geom"] = vars["wkt"] value = table.insert(**vars) # onaccept vars["id"] = value current.gis.update_location_tree(vars) return (value, None) else: return (None, None) else: # This must be an Update form if not current.auth.s3_has_permission("update", table, record_id=value): return (value, current.auth.messages.access_denied) # Check that this is a valid location_id query = (table.id == value) & \ (table.deleted == False) & \ (table.level == None) # NB Specific Locations only location = db(query).select(table.id, limitby=(0, 1)).first() if location: # Update the record, in case changes have been made self.id = value location = self._process_values() if self.errors: errors = self.errors error = "" for e in errors: error = "%s\n%s" % (error, errors[e]) if error else errors[e] return (value, error) vars = dict(name = location.name, lat = location.lat, lon = location.lon, inherited = location.inherited, addr_street = location.street, addr_postcode = location.postcode, parent = location.parent, wkt = location.wkt, lon_min = location.lon_min, lon_max = location.lon_max, lat_min = location.lat_min, lat_max = location.lat_max ) if vars["wkt"] and current.deployment_settings.get_gis_spatialdb(): # Also populate the spatial field vars["the_geom"] = vars["wkt"] db(table.id == value).update(**vars) # onaccept vars["id"] = value current.gis.update_location_tree(vars) return (value, None) else: return (value, self.error_message or current.T("Invalid Location!")) # ------------------------------------------------------------------------- def _process_values(self): """ Read the request.vars & prepare for a record insert/update Note: This is also used by IS_SITE_SELECTOR() """ # Rough check for valid Lat/Lon (detailed later) vars = current.request.vars lat = vars.get("gis_location_lat", None) lon = vars.get("gis_location_lon", None) if lat: try: lat = float(lat) except ValueError: self.errors["lat"] = current.T("Latitude is Invalid!") if lon: try: lon = float(lon) except ValueError: self.errors["lon"] = current.T("Longitude is Invalid!") if self.errors: return None L0 = vars.get("gis_location_L0", None) db = current.db table = db.gis_location # Are we allowed to create Locations? auth = current.auth def permitted_to_create(): if not auth.s3_has_permission("create", table): self.errors["location_id"] = auth.messages.access_denied return False return True # What level of hierarchy are we allowed to edit? s3db = current.s3db if auth.s3_has_role(current.session.s3.system_roles.MAP_ADMIN): # 'MapAdmin' always has permission to edit hierarchy locations L1_allowed = True L2_allowed = True L3_allowed = True L4_allowed = True L5_allowed = True else: if L0: htable = s3db.gis_hierarchy query = (htable.location_id == L0) config = db(query).select(htable.edit_L1, htable.edit_L2, htable.edit_L3, htable.edit_L4, htable.edit_L5, limitby=(0, 1)).first() if L0 and config: # Lookup each level individually L1_allowed = config.edit_L1 L2_allowed = config.edit_L2 L3_allowed = config.edit_L3 L4_allowed = config.edit_L4 L5_allowed = config.edit_L5 else: # default is True L1_allowed = True L2_allowed = True L3_allowed = True L4_allowed = True L5_allowed = True # We don't need to do onvalidation of the Location Hierarchy records # separately as we don't have anything extra to validate than we have # done already onaccept = current.gis.update_location_tree L1 = vars.get("gis_location_L1", None) L2 = vars.get("gis_location_L2", None) L3 = vars.get("gis_location_L3", None) L4 = vars.get("gis_location_L4", None) L5 = vars.get("gis_location_L5", None) # Check if we have parents to create # L1 if L1: try: # Is this an ID? L1 = int(L1) # Do we need to update it's parent? if L0: location = db(table.id == L1).select(table.name, table.parent, limitby=(0, 1) ).first() if location and (location.parent != int(L0)): db(query).update(parent = L0) location["level"] = "L1" location["id"] = L1 onaccept(location) except: # Name # Test for duplicates query = (table.name == L1) & (table.level == "L1") if L0: query &= (table.parent == L0) location = db(query).select(table.id, limitby=(0, 1)).first() if location: # Use Existing record L1 = location.id elif L1_allowed: if permitted_to_create(): if L0: f = dict(name = L1, level = "L1", parent = L0, ) L1 = table.insert(**f) f["id"] = L1 onaccept(f) else: f = dict(name=L1, level="L1", ) L1 = table.insert(**f) f["id"] = L1 onaccept(f) else: return None else: L1 = None # L2 if L2: try: # Is this an ID? L2 = int(L2) # Do we need to update it's parent? if L1: location = db(table.id == L2).select(table.name, table.parent, limitby=(0, 1)).first() if location and (location.parent != L1): db(query).update(parent=L1) location["level"] = "L2" location["id"] = L2 onaccept(location) except: # Name # Test for duplicates # @ToDo: Also check for L2 parenting direct to L0 query = (table.name == L2) & (table.level == "L2") if L1: query &= (table.parent == L1) location = db(query).select(table.id, limitby=(0, 1)).first() if location: # Use Existing record L2 = location.id elif L2_allowed: if permitted_to_create(): if L1: f = dict(name=L2, level="L2", parent=L1, ) L2 = table.insert(**f) f["id"] = L2 onaccept(f) elif L0: f = dict(name=L2, level="L2", parent=L0, ) L2 = table.insert(**f) f["id"] = L2 onaccept(f) else: f = dict(name=L2, level="L2", ) L2 = table.insert(**f) f["id"] = L2 onaccept(f) else: return None else: L2 = None # L3 if L3: try: # Is this an ID? L3 = int(L3) # Do we need to update it's parent? if L2: location = db(table.id == L3).select(table.name, table.parent, limitby=(0, 1)).first() if location and (location.parent != L2): db(query).update(parent=L2) location["level"] = "L3" location["id"] = L3 onaccept(location) except: # Name # Test for duplicates # @ToDo: Also check for L3 parenting direct to L0/1 query = (table.name == L3) & (table.level == "L3") if L2: query &= (table.parent == L2) location = db(query).select(table.id, limitby=(0, 1)).first() if location: # Use Existing record L3 = location.id elif L3_allowed: if permitted_to_create(): if L2: f = dict(name=L3, level="L3", parent=L2, ) L3 = table.insert(**f) f["id"] = L3 onaccept(f) elif L1: f = dict(name=L3, level="L3", parent=L1, ) L3 = table.insert(**f) f["id"] = L3 onaccept(f) elif L0: f = dict(name=L3, level="L3", parent=L0, ) L3 = table.insert(**f) f["id"] = L3 onaccept(f) else: f = dict(name=L3, level="L3", ) L3 = table.insert(**f) f["id"] = L3 onaccept(f) else: return None else: L3 = None # L4 if L4: try: # Is this an ID? L4 = int(L4) # Do we need to update it's parent? if L3: location = db(table.id == L4).select(table.name, table.parent, limitby=(0, 1)).first() if location and (location.parent != L3): db(query).update(parent=L3) location["level"] = "L4" location["id"] = L4 onaccept(location) except: # Name # Test for duplicates # @ToDo: Also check for L4 parenting direct to L0/1/2 query = (table.name == L4) & (table.level == "L4") if L3: query &= (table.parent == L3) location = db(query).select(table.id, limitby=(0, 1)).first() if location: # Use Existing record L4 = location.id elif L4_allowed: if permitted_to_create(): if L3: f = dict(name=L4, level="L4", parent=L3, ) L4 = table.insert(**f) f["id"] = L4 onaccept(f) elif L2: f = dict(name=L4, level="L4", parent=L2, ) L4 = table.insert(**f) f["id"] = L4 onaccept(f) elif L1: f = dict(name=L4, level="L4", parent=L1, ) L4 = table.insert(**f) f["id"] = L4 onaccept(f) elif L0: f = dict(name=L4, level="L4", parent=L0, ) L4 = table.insert(**f) f["id"] = L4 onaccept(f) else: f = dict(name=L4, level="L4", ) L4 = table.insert(**f) f["id"] = L4 onaccept(f) else: return None else: L4 = None # L5 if L5: try: # Is this an ID? L5 = int(L5) # Do we need to update it's parent? if L4: location = db(table.id == L5).select(table.name, table.parent, limitby=(0, 1)).first() if location and (location.parent != L4): db(query).update(parent=L4) location["level"] = "L5" location["id"] = L5 onaccept(location) except: # Name # Test for duplicates # @ToDo: Also check for L5 parenting direct to L0/1/2/3 query = (table.name == L5) & (table.level == "L5") if L4: query &= (table.parent == L4) location = db(query).select(table.id, limitby=(0, 1)).first() if location: # Use Existing record L5 = location.id elif L5_allowed: if permitted_to_create(): if L4: f = dict(name=L5, level="L5", parent=L4, ) L5 = table.insert(**f) f["id"] = L5 onaccept(f) elif L3: f = dict(name=L5, level="L5", parent=L3, ) L5 = table.insert(**f) f["id"] = L5 onaccept(f) elif L2: f = dict(name=L5, level="L5", parent=L2, ) L5 = table.insert(**f) f["id"] = L5 onaccept(f) elif L1: f = dict(name=L5, level="L5", parent=L1, ) L5 = table.insert(**f) f["id"] = L5 onaccept(f) elif L0: f = dict(name=L5, level="L5", parent=L1, ) L5 = table.insert(**f) f["id"] = L5 onaccept(f) else: f = dict(name=L5, level="L5", ) L5 = table.insert(**f) f["id"] = L5 onaccept(f) else: return None else: L5 = None # Check if we have a specific location to create name = vars.get("gis_location_name", None) wkt = vars.get("gis_location_wkt", None) street = vars.get("gis_location_street", None) postcode = vars.get("gis_location_postcode", None) parent = L5 or L4 or L3 or L2 or L1 or L0 or None # Move vars into form. form = Storage() form.errors = dict() form.vars = Storage() vars = form.vars vars.lat = lat vars.lon = lon vars.wkt = wkt if wkt: # Polygon (will be corrected as-required by wkt_centroid) vars.gis_feature_type = "3" else: # Point vars.gis_feature_type = "1" vars.parent = parent if self.id: # Provide the old record to check inherited form.record = db(table.id == self.id).select(table.inherited, table.lat, table.lon, limitby=(0, 1)).first() # onvalidation s3db.gis_location_onvalidation(form) if form.errors: self.errors = form.errors return None location = Storage(name=name, lat=vars.lat, lon=vars.lon, inherited=vars.inherited, street=street, postcode=postcode, parent=parent, wkt = vars.wkt, gis_feature_type = vars.gis_feature_type, lon_min = vars.lon_min, lon_max = vars.lon_max, lat_min = vars.lat_min, lat_max = vars.lat_max ) return location # ============================================================================= class IS_LOCATION_SELECTOR2(Validator): """ Designed for use within the S3LocationSelectorWidget2. For Create forms, this will create a new location if there is a Lat/Lon submitted For Update forms, this will check that we have a valid location_id FK and update any changes @ToDo: Audit """ def __init__(self, levels=("L1", "L2", "L3"), error_message = None, ): self.levels = levels self.error_message = error_message # Tell s3_mark_required that this validator doesn't accept NULL values self.mark_required = True # ------------------------------------------------------------------------- def __call__(self, value): if current.response.s3.bulk: # Pointless in imports return (value, None) vars = current.request.post_vars address = vars.get("address", None) postcode = vars.get("postcode", None) lat = vars.get("lat", None) if lat == "": lat = None lon = vars.get("lon", None) if lon == "": lon = None wkt = vars.get("wkt", None) if wkt == "": wkt = None parent = vars.get("parent", None) # Rough check for valid Lat/Lon errors = Storage() if lat: try: lat = float(lat) except ValueError: errors["lat"] = current.T("Latitude is Invalid!") if lon: try: lon = float(lon) except ValueError: errors["lon"] = current.T("Longitude is Invalid!") if wkt: try: from shapely.wkt import loads as wkt_loads polygon = wkt_loads(wkt) except: errors["wkt"] = current.T("WKT is Invalid!") if errors: return (value, errors) if parent or address or postcode or wkt is not None or \ (lat is not None and lon is not None): # Specific Location db = current.db table = db.gis_location if value == "dummy": # Create a new point if not current.auth.s3_has_permission("create", table): return (None, current.auth.messages.access_denied) vars = Storage(lat=lat, lon=lon, wkt=wkt, inherited=False, addr_street=address, addr_postcode=postcode, parent=parent, ) # onvalidation # - includes detailed bounds check if deployment_setting doesn't disable it form = Storage() form.errors = errors form.vars = vars current.s3db.gis_location_onvalidation(form) if form.errors: errors = form.errors error = "" for e in errors: error = "%s\n%s" % (error, errors[e]) if error else errors[e] return (parent, error) id = table.insert(**vars) vars.id = id # onaccept current.gis.update_location_tree(vars) return (id, None) else: # Update existing Point # Check that this is a valid location_id query = (table.id == value) & \ (table.deleted == False) & \ (table.level == None) # NB Specific Locations only location = db(query).select(table.lat, table.lon, table.wkt, table.addr_street, table.addr_postcode, table.parent, limitby=(0, 1)).first() if location: changed = False lparent = location.parent if parent and lparent: if int(parent) != int(lparent): changed = True elif parent or lparent: changed = True if not changed: addr_street = location.addr_street if address and addr_street: if address != addr_street: changed = True elif address or addr_street: changed = True if not changed: addr_postcode = location.addr_postcode if postcode and addr_postcode: if postcode != addr_postcode: changed = True elif postcode or addr_postcode: changed = True if not changed: if wkt and wkt != location.wkt: changed = True else: # Float comparisons need care - just check the 1st 5 decimal points, as that's all we care about llat = location.lat if lat is not None and llat is not None: if round(lat, 5) != round(llat, 5): changed = True elif lat is not None or llat is not None: changed = True if not changed: llon = location.lon if lon is not None and llon is not None: if round(lon, 5) != round(llon, 5): changed = True elif lon is not None or llon is not None: changed = True if changed: # Update the record if not current.auth.s3_has_permission("update", table, record_id=value): return (value, current.auth.messages.access_denied) vars = Storage(addr_street=address, addr_postcode=postcode, parent=parent, ) if lat is not None and lon is not None: vars.lat = lat vars.lon = lon vars.inherited = False elif wkt is not None: vars.wkt = wkt vars.inherited = False # onvalidation # - includes detailed bounds check if deployment_setting doesn't disable it form = Storage() form.errors = errors form.vars = vars current.s3db.gis_location_onvalidation(form) if form.errors: errors = form.errors error = "" for e in errors: error = "%s\n%s" % (error, errors[e]) if error else errors[e] return (value, error) # Update the record db(table.id == value).update(**vars) # Update location tree in case parent has changed vars.id = value # onaccept current.gis.update_location_tree(vars) return (value, None) else: return (value, self.error_message or current.T("Invalid Location!")) else: # Lx or a specific location with blank Parent/Address/Lat/Lon if value: db = current.db table = db.gis_location query = (table.id == value) & \ (table.deleted == False) location = db(query).select(table.level, table.lat, table.lon, table.addr_street, table.addr_postcode, table.parent, limitby=(0, 1)).first() if not location: return (value, self.error_message or current.T("Invalid Location!")) if location.level: # Do a simple Location check return IS_LOCATION(level=self.levels)(value) else: # Clear the Parent/Lat/Lon/Address vars = Storage(lat = None, lon = None, addr_street = None, addr_postcode = None, parent = None) db(table.id == value).update(**vars) # Update location tree in case parent has changed vars.id = value # onaccept current.gis.update_location_tree(vars) else: # Do a simple Location check return IS_LOCATION(level=self.levels)(value) # ============================================================================= class IS_SITE_SELECTOR(IS_LOCATION_SELECTOR): """ Extends the IS_LOCATION_SELECTOR() validator to transparently support Sites of the specified type. Note that these cannot include any other mandatory fields other than Name & location_id Designed for use within the ???S3LocationSelectorWidget. For Create forms, this will create a new site & location from the additional fields For Update forms, this will normally just check that we have a valid site_id FK - although there is the option to create a new location there too, in which case it acts as-above. @ToDo: Audit """ def __init__(self, site_type = "project_site", error_message = None, ): self.error_message = error_message self.errors = Storage() self.id = None self.site_type = site_type # Tell s3_mark_required that this validator doesn't accept NULL values self.mark_required = True # ------------------------------------------------------------------------- def __call__(self, value): if current.response.s3.bulk: # Pointless in imports return (value, None) db = current.db auth = current.auth gis = current.gis table = db.gis_location stable = db[self.site_type] if value == "dummy": # Create form if not auth.s3_has_permission("create", stable): return (None, auth.messages.access_denied) location = self._process_values() if self.errors: errors = self.errors error = "" for e in errors: error = "%s\n%s" % (error, errors[e]) if error else errors[e] return (None, error) if location.name or location.lat or location.lon or \ location.street or location.postcode or location.parent: # Location creation vars = dict(name = location.name, lat = location.lat, lon = location.lon, addr_street = location.street, addr_postcode = location.postcode, parent = location.parent, wkt = form.vars.wkt, lon_min = form.vars.lon_min, lon_max = form.vars.lon_max, lat_min = form.vars.lat_min, lat_max = form.vars.lat_max ) location_id = table.insert(**vars) # Location onaccept vars["id"] = location_id gis.update_location_tree(vars) # Site creation value = stable.insert(name = location.name, location_id = location_id) return (value, None) else: return (None, None) else: # This must be an Update form if not auth.s3_has_permission("update", stable, record_id=value): return (value, auth.messages.access_denied) # Check that this is a valid site_id query = (stable.id == value) & \ (stable.deleted == False) site = db(query).select(stable.id, stable.name, stable.location_id, limitby=(0, 1)).first() location_id = site.location_id if site else None if location_id: # Update the location, in case changes have been made self.id = value location = self._process_values() if self.errors: errors = self.errors error = "" for e in errors: error = "%s\n%s" % (error, errors[e]) if error else errors[e] return (value, error) # Location update name = location.name vars = dict(name = name, lat = location.lat, lon = location.lon, addr_street = location.street, addr_postcode = location.postcode, parent = location.parent ) lquery = (table.id == location_id) db(lquery).update(**vars) # Location onaccept vars["id"] = location_id gis.update_location_tree(vars) if stable.name != name: # Site Name has changed db(query).update(name = name) return (value, None) return (value, self.error_message or current.T("Invalid Site!")) # ============================================================================= class IS_ADD_PERSON_WIDGET(Validator): """ Validator for S3AddPersonWidget """ def __init__(self, error_message=None): self.error_message = error_message # Tell s3_mark_required that this validator doesn't accept NULL values self.mark_required = True # ------------------------------------------------------------------------- def __call__(self, value): if current.response.s3.bulk: # Pointless in imports return (value, None) person_id = None if value: try: person_id = int(value) except: pass request = current.request if request.env.request_method == "POST": if "import" in request.args: # Widget Validator not appropriate for this context return (person_id, None) T = current.T db = current.db s3db = current.s3db ptable = db.pr_person ctable = db.pr_contact def email_validate(value, person_id): """ Validate the email address """ error_message = T("Please enter a valid email address") if value is not None: value = value.strip() # No email? if not value: email_required = \ current.deployment_settings.get_hrm_email_required() if email_required: return (value, error_message) return (value, None) # Valid email? value, error = IS_EMAIL()(value) if error: return value, error_message # Unique email? query = (ctable.deleted != True) & \ (ctable.contact_method == "EMAIL") & \ (ctable.value == value) if person_id: query &= (ctable.pe_id == ptable.pe_id) & \ (ptable.id != person_id) email = db(query).select(ctable.id, limitby=(0, 1)).first() if email: error_message = T("This email-address is already registered.") return value, error_message # Ok! return value, None _vars = request.post_vars mobile = _vars["mobile_phone"] if mobile: # Validate the phone number regex = re.compile(single_phone_number_pattern) if not regex.match(mobile): error = T("Invalid phone number") return (person_id, error) validate = current.manager.validate if person_id: # Filter out location_id (location selector form values # being processed only after this widget has been validated) _vars = Storage([(k, _vars[k]) for k in _vars if k != "location_id"]) # Validate and update the person record query = (ptable.id == person_id) data = Storage() for f in ptable._filter_fields(_vars): value, error = validate(ptable, None, f, _vars[f]) if error: return (person_id, error) if value: if f == "date_of_birth": data[f] = value.isoformat() else: data[f] = value if data: db(query).update(**data) # Update the contact information & details record = db(query).select(ptable.pe_id, limitby=(0, 1)).first() if record: pe_id = record.pe_id r = ctable(pe_id=pe_id, contact_method="EMAIL") email = _vars["email"] if email: query = (ctable.pe_id == pe_id) & \ (ctable.contact_method == "EMAIL") &\ (ctable.deleted != True) r = db(query).select(ctable.value, limitby=(0, 1)).first() if r: # update if email != r.value: db(query).update(value=email) else: # insert ctable.insert(pe_id=pe_id, contact_method="EMAIL", value=email) if mobile: query = (ctable.pe_id == pe_id) & \ (ctable.contact_method == "SMS") &\ (ctable.deleted != True) r = db(query).select(ctable.value, limitby=(0, 1)).first() if r: # update if mobile != r.value: db(query).update(value=mobile) else: # insert ctable.insert(pe_id=pe_id, contact_method="SMS", value=mobile) occupation = _vars["occupation"] if occupation: pdtable = s3db.pr_person_details query = (pdtable.person_id == person_id) & \ (pdtable.deleted != True) r = db(query).select(pdtable.occupation, limitby=(0, 1)).first() if r: # update if occupation != r.occupation: db(query).update(occupation=occupation) else: # insert pdtable.insert(person_id=person_id, occupation=occupation) else: # Create a new person record # Filter out location_id (location selector form values # being processed only after this widget has been validated) _vars = Storage([(k, _vars[k]) for k in _vars if k != "location_id"]) # Validate the email email, error = email_validate(_vars.email, None) if error: return (None, error) # Validate and add the person record for f in ptable._filter_fields(_vars): value, error = validate(ptable, None, f, _vars[f]) if error: return (None, error) elif f == "date_of_birth" and \ value: _vars[f] = value.isoformat() person_id = ptable.insert(**ptable._filter_fields(_vars)) # Need to update post_vars here, # for some reason this doesn't happen through validation alone request.post_vars.update(person_id=str(person_id)) if person_id: # Update the super-entities s3db.update_super(ptable, dict(id=person_id)) # Read the created pe_id query = (ptable.id == person_id) person = db(query).select(ptable.pe_id, limitby=(0, 1)).first() # Add contact information as provided if _vars.email: ctable.insert(pe_id=person.pe_id, contact_method="EMAIL", value=_vars.email) if mobile: ctable.insert(pe_id=person.pe_id, contact_method="SMS", value=_vars.mobile_phone) if _vars.occupation: s3db.pr_person_details.insert(person_id = person_id, occupation = _vars.occupation) else: # Something went wrong return (None, self.error_message or \ T("Could not add person record")) return (person_id, None) # ============================================================================= class IS_ADD_PERSON_WIDGET2(Validator): """ Validator for S3AddPersonWidget2 @ToDo: get working human_resource_id """ def __init__(self, error_message=None): self.error_message = error_message # Tell s3_mark_required that this validator doesn't accept NULL values self.mark_required = True # ------------------------------------------------------------------------- def __call__(self, value): if current.response.s3.bulk: # Pointless in imports return (value, None) person_id = None if value: try: person_id = int(value) except: pass request = current.request if request.env.request_method == "POST": if "import" in request.args: # Widget Validator not appropriate for this context return (person_id, None) T = current.T db = current.db s3db = current.s3db ptable = db.pr_person ctable = s3db.pr_contact def name_split(name): """ Split a full name into First Middle Last NB This *will* cause issues as people often have multi-word firstnames and surnames http://stackoverflow.com/questions/259634/splitting-a-persons-name-into-forename-and-surname http://stackoverflow.com/questions/159567/how-can-i-parse-the-first-middle-and-last-name-from-a-full-name-field-in-sql """ #names = name.split(" ") # Remove prefixes & suffixes #bad = ("mr", "mrs", "ms", "dr", "eng", # "jr", "sr", "esq", "junior", "senior", # "ii", "iii", "iv", "v", # "2nd", "3rd", "4th", "5th", # ) #names = filter(lambda x: x.lower() not in bad, names) # Assume First Name is a single word #first_name = names[0] # Assume Last Name is a single word! #if len(names) > 1: # last_name = names[-1] #else: # last_name = None # Assume all other names go into the Middle Name #if len(names) > 2: # middle_name = " ".join(names[1:-1]) #else: # middle_name = None #return first_name, middle_name, last_name # https://code.google.com/p/python-nameparser/ from nameparser import HumanName name = HumanName(name) return name.first, name.middle, name.last def email_validate(value, person_id): """ Validate the email address """ error_message = T("Please enter a valid email address") if value is not None: value = value.strip() # No email? if not value: email_required = \ current.deployment_settings.get_hrm_email_required() if email_required: return (value, error_message) return (value, None) # Valid email? value, error = IS_EMAIL()(value) if error: return value, error_message # Unique email? query = (ctable.deleted != True) & \ (ctable.contact_method == "EMAIL") & \ (ctable.value == value) if person_id: query &= (ctable.pe_id == ptable.pe_id) & \ (ptable.id != person_id) email = db(query).select(ctable.id, limitby=(0, 1)).first() if email: error_message = T("This email-address is already registered.") return value, error_message # Ok! return value, None _vars = request.post_vars mobile = _vars["mobile_phone"] if mobile: # Validate the phone number regex = re.compile(single_phone_number_pattern) if not regex.match(mobile): error = T("Invalid phone number") return (person_id, error) home_phone = _vars.get("home_phone", None) if home_phone: # Validate the phone number regex = re.compile(single_phone_number_pattern) if not regex.match(home_phone): error = T("Invalid phone number") return (person_id, error) validate = current.manager.validate if person_id: # Filter out location_id (location selector form values # being processed only after this widget has been validated) _vars = Storage([(k, _vars[k]) for k in _vars if k != "location_id"]) # Separate the Name into components first_name, middle_name, last_name = name_split(_vars["full_name"]) _vars["first_name"] = first_name _vars["middle_name"] = middle_name _vars["last_name"] = last_name # Validate and update the person record query = (ptable.id == person_id) data = Storage() for f in ptable._filter_fields(_vars): value, error = validate(ptable, None, f, _vars[f]) if error: return (person_id, error) if value: if f == "date_of_birth": data[f] = value.isoformat() else: data[f] = value if data: db(query).update(**data) # Update the contact information & details record = db(query).select(ptable.pe_id, limitby=(0, 1)).first() if record: pe_id = record.pe_id r = ctable(pe_id=pe_id, contact_method="EMAIL") email = _vars["email"] if email: query = (ctable.pe_id == pe_id) & \ (ctable.contact_method == "EMAIL") &\ (ctable.deleted != True) r = db(query).select(ctable.value, limitby=(0, 1)).first() if r: # update if email != r.value: db(query).update(value=email) else: # insert ctable.insert(pe_id=pe_id, contact_method="EMAIL", value=email) if mobile: query = (ctable.pe_id == pe_id) & \ (ctable.contact_method == "SMS") &\ (ctable.deleted != True) r = db(query).select(ctable.value, limitby=(0, 1)).first() if r: # update if mobile != r.value: db(query).update(value=mobile) else: # insert ctable.insert(pe_id=pe_id, contact_method="SMS", value=mobile) if home_phone: query = (ctable.pe_id == pe_id) & \ (ctable.contact_method == "HOME_PHONE") &\ (ctable.deleted != True) r = db(query).select(ctable.value, limitby=(0, 1)).first() if r: # update if home_phone != r.value: db(query).update(value=home_phone) else: # insert ctable.insert(pe_id=pe_id, contact_method="HOME_PHONE", value=home_phone) occupation = _vars.get("occupation", None) if occupation: pdtable = s3db.pr_person_details query = (pdtable.person_id == person_id) & \ (pdtable.deleted != True) r = db(query).select(pdtable.occupation, limitby=(0, 1)).first() if r: # update if occupation != r.occupation: db(query).update(occupation=occupation) else: # insert pdtable.insert(person_id=person_id, occupation=occupation) else: # Create a new person record # Filter out location_id (location selector form values # being processed only after this widget has been validated) _vars = Storage([(k, _vars[k]) for k in _vars if k != "location_id"]) # Validate the email email, error = email_validate(_vars.email, None) if error: return (None, error) # Separate the Name into components first_name, middle_name, last_name = name_split(_vars["full_name"]) _vars["first_name"] = first_name _vars["middle_name"] = middle_name _vars["last_name"] = last_name # Validate and add the person record for f in ptable._filter_fields(_vars): value, error = validate(ptable, None, f, _vars[f]) if error: return (None, None) elif f == "date_of_birth" and \ value: _vars[f] = value.isoformat() person_id = ptable.insert(**ptable._filter_fields(_vars)) # Need to update post_vars here, # for some reason this doesn't happen through validation alone request.post_vars.update(person_id=str(person_id)) if person_id: # Update the super-entities s3db.update_super(ptable, dict(id=person_id)) # Read the created pe_id query = (ptable.id == person_id) person = db(query).select(ptable.pe_id, limitby=(0, 1)).first() # Add contact information as provided if _vars.email: ctable.insert(pe_id=person.pe_id, contact_method="EMAIL", value=_vars.email) if mobile: ctable.insert(pe_id=person.pe_id, contact_method="SMS", value=_vars.mobile_phone) if home_phone: ctable.insert(pe_id=person.pe_id, contact_method="HOME_PHONE", value=_vars.home_phone) if _vars.occupation: s3db.pr_person_details.insert(person_id = person_id, occupation = _vars.occupation) else: # Something went wrong return (person_id, self.error_message or \ T("Could not add person record")) return (person_id, None) # ============================================================================= class IS_PROCESSED_IMAGE(Validator): """ Uses an S3ImageCropWidget to allow the user to crop/scale images and processes the results sent by the browser. @param file_cb: callback that returns the file for this field @param error_message: the error message to be returned @param image_bounds: the boundaries for the processed image @param upload_path: upload path for the image """ def __init__(self, field_name, file_cb, error_message="No image was specified!", image_bounds=(300, 300), upload_path=None, ): self.field_name = field_name self.file_cb = file_cb self.error_message = error_message self.image_bounds = image_bounds self.upload_path = upload_path def __call__(self, value): if current.response.s3.bulk: # Pointless in imports return (value, None) r = current.request vars = r.post_vars if r.env.request_method == "GET": return (value, None) # If there's a newly uploaded file, accept it. It'll be processed in # the update form. # NOTE: A FieldStorage with data evaluates as False (odd!) file = vars.get(self.field_name) if file not in ("", None): return (file, None) encoded_file = vars.get("imagecrop-data") file = self.file_cb() if not (encoded_file or file): return value, current.T(self.error_message) # Decode the base64-encoded image from the client side image crop # process if, that worked. if encoded_file: import base64 import uuid try: from cStringIO import StringIO except ImportError: from StringIO import StringIO metadata, encoded_file = encoded_file.split(",") filename, datatype, enctype = metadata.split(";") f = Storage() f.filename = uuid.uuid4().hex + filename f.file = StringIO(base64.decodestring(encoded_file)) return (f, None) # Crop the image, if we've got the crop points. points = vars.get("imagecrop-points") if points and file: import os points = map(float, points.split(",")) if not self.upload_path: path = os.path.join(r.folder, "uploads", "images", file) else: path = os.path.join(self.upload_path, file) current.s3task.async("crop_image", args=[path] + points + [self.image_bounds[0]]) return (None, None) # ============================================================================= class IS_UTC_OFFSET(Validator): """ Validates a given string value as UTC offset in the format +/-HHMM @param error_message: the error message to be returned @note: all leading parts of the string (before the trailing offset specification) will be ignored and replaced by 'UTC ' in the return value, if the string passes through. """ def __init__(self, error_message="invalid UTC offset!" ): self.error_message = error_message # ------------------------------------------------------------------------- def __call__(self, value): if value and isinstance(value, str): _offset_str = value.strip() offset = S3DateTime.get_offset_value(_offset_str) if offset is not None and offset > -86340 and offset < 86340: # Add a leading 'UTC ', # otherwise leading '+' and '0' will be stripped away by web2py return ("UTC " + _offset_str[-5:], None) return (value, self.error_message) # ============================================================================= class IS_UTC_DATETIME(Validator): """ Validates a given value as datetime string and returns the corresponding UTC datetime. Example: - INPUT(_type="text", _name="name", requires=IS_UTC_DATETIME()) @param format: strptime/strftime format template string, for directives refer to your strptime implementation @param error_message: error message to be returned @param utc_offset: offset to UTC in seconds, if not specified, the value is considered to be UTC @param minimum: the minimum acceptable datetime @param maximum: the maximum acceptable datetime @note: datetime has to be in the ISO8960 format YYYY-MM-DD hh:mm:ss, with an optional trailing UTC offset specified as +/-HHMM (+ for eastern, - for western timezones) """ def __init__(self, format=None, error_message=None, utc_offset=None, minimum=None, maximum=None): if format is None: self.format = format = str(current.deployment_settings.get_L10n_datetime_format()) else: self.format = format = str(format) self.utc_offset = utc_offset self.minimum = minimum self.maximum = maximum delta = timedelta(seconds=self.delta()) min_local = minimum and minimum + delta or None max_local = maximum and maximum + delta or None if error_message is None: if minimum is None and maximum is None: error_message = current.T("enter date and time") elif minimum is None: error_message = current.T("enter date and time on or before %(max)s") elif maximum is None: error_message = current.T("enter date and time on or after %(min)s") else: error_message = current.T("enter date and time in range %(min)s %(max)s") if min_local: min = min_local.strftime(format) else: min = "" if max_local: max = max_local.strftime(format) else: max = "" self.error_message = error_message % dict(min = min, max = max) # ------------------------------------------------------------------------- def delta(self, utc_offset=None): if utc_offset is not None: self.utc_offset = utc_offset if self.utc_offset is None: self.utc_offset = current.session.s3.utc_offset validate = IS_UTC_OFFSET() offset, error = validate(self.utc_offset) if error: self.utc_offset = "UTC +0000" # fallback to UTC else: self.utc_offset = offset delta = S3DateTime.get_offset_value(self.utc_offset) return delta # ------------------------------------------------------------------------- def __call__(self, value): val = value.strip() # Get UTC offset if len(val) > 5 and val[-5] in ("+", "-") and val[-4:].isdigit(): # UTC offset specified in dtstr dtstr = val[0:-5].strip() utc_offset = "UTC %s" % val[-5:] else: # use default UTC offset dtstr = val utc_offset = self.utc_offset # Offset must be in range -2359 to +2359 offset = self.delta(utc_offset=utc_offset) if offset < -86340 or offset > 86340: return (val, self.error_message) # Convert into datetime object try: (y, m, d, hh, mm, ss, t0, t1, t2) = \ time.strptime(dtstr, self.format) dt = datetime(y, m, d, hh, mm, ss) except: try: (y, m, d, hh, mm, ss, t0, t1, t2) = \ time.strptime(dtstr + ":00", self.format) dt = datetime(y, m, d, hh, mm, ss) except: return(value, self.error_message) # Validate dt_utc = dt - timedelta(seconds=offset) if self.minimum and dt_utc < self.minimum or \ self.maximum and dt_utc > self.maximum: return (dt_utc, self.error_message) else: return (dt_utc, None) # ------------------------------------------------------------------------- def formatter(self, value): format = self.format offset = self.delta() if not value: return "-" elif offset: dt = value + timedelta(seconds=offset) return dt.strftime(format) else: dt = value return dt.strftime(format) + "+0000" # ============================================================================= class IS_ACL(IS_IN_SET): """ Validator for ACLs @attention: Incomplete! Does not validate yet, but just convert. """ def __call__(self, value): """ Validation @param value: the value to validate """ if not isinstance(value, (list, tuple)): value = [value] acl = 0x0000 for v in value: try: flag = int(v) except (ValueError, TypeError): flag = 0x0000 else: acl |= flag return (acl, None) # ============================================================================= class IS_COMBO_BOX(Validator): """ Designed for use with an Autocomplete. - catches any new entries & creates the appropriate record @ToDo: Audit """ def __init__(self, tablename, requires, # The normal validator error_message = None, ): self.tablename = tablename self.requires = requires self.error_message = error_message # ------------------------------------------------------------------------- def __call__(self, value): if not value: # Do the normal validation return self.requires(value) elif isinstance(value, int): # If this is an ID then this is an update form # @ToDo: Can we assume that? # Do the normal validation return self.requires(value) else: # Name => create form tablename = self.tablename db = current.db table = db[tablename] # Test for duplicates query = (table.name == value) r = db(query).select(table.id, limitby=(0, 1)).first() if r: # Use Existing record value = r.id return (value, None) if not current.auth.s3_has_permission("create", table): return (None, current.auth.messages.access_denied) value = table.insert(name=value) # onaccept onaccept = current.s3db.get_config(tablename, "onaccept") if onaccept: onaccept(form=Storage(vars=Storage(id=value))) return (value, None) # ============================================================================= class QUANTITY_INV_ITEM(object): """ For Inventory module """ def __init__(self, db, inv_item_id, item_pack_id ): self.inv_item_id = inv_item_id self.item_pack_id = item_pack_id current.db = db # ------------------------------------------------------------------------- def __call__(self, value): db = current.db args = current.request.args track_quantity = 0 if args[1] == "track_item" and len(args) > 2: # look to see if we already have a quantity stored in the track item id = args[2] track_record = current.s3db.inv_track_item[id] track_quantity = track_record.quantity if track_quantity >= float(value): # value reduced or unchanged return (value, None) error = "Invalid Quantity" # @todo: better error catching query = (db.inv_inv_item.id == self.inv_item_id) & \ (db.inv_inv_item.item_pack_id == db.supply_item_pack.id) inv_item_record = db(query).select(db.inv_inv_item.quantity, db.supply_item_pack.quantity, db.supply_item_pack.name, limitby = (0, 1)).first() # @todo: this should be a virtual field if inv_item_record and value: query = (db.supply_item_pack.id == self.item_pack_id) send_record = db(query).select(db.supply_item_pack.quantity, limitby=(0, 1)).first() send_quantity = (float(value) - track_quantity) * send_record.quantity inv_quantity = inv_item_record.inv_inv_item.quantity * \ inv_item_record.supply_item_pack.quantity if send_quantity > inv_quantity: return (value, "Only %s %s (%s) in the Warehouse Stock." % (inv_quantity, inv_item_record.supply_item_pack.name, inv_item_record.supply_item_pack.quantity) ) else: return (value, None) else: return (value, error) # ------------------------------------------------------------------------- def formatter(self, value): return value # ============================================================================= class IS_IN_SET_LAZY(Validator): """ Like IS_IN_SET but with options obtained from a supplied function. Options are instantiated when the validator or its options() method is called, so don't need to be generated until it's used. Useful if the field is not needed on every request, and does significant processing to construct its options, or generates a large collection. If the options are just from a database query, one can use IS_ONE_OF instead. Raises an exception if an options collection is passed rather than a callable as this is a programming error, e.g. accidentally *calling* the options function in the constructor instead of passing the function. That would not get lazy options instantiation. The options collection (theset) and labels collection parameters to IS_IN_SET are replaced by: @param theset_fn: Function of no arguments that returns a collection of options and (optionally) labels. Both options and labels can be supplied via a dict or OrderedDict (options are keys, values are labels), list (or tuple) of two-element lists (or tuples) (element 0 in each pair is an option, element 1 is it's label). Otherwise, labels are obtained either by calling the supplied represent function on each item produced by theset_fn, or (if no represent is supplied), the items themselves are used as labels. @param represent: Function of one argument that returns the label for a given option. If there is a function call that returns the collection, just put "lambda:" in front of the call. E.g.: Field("nationality", requires = IS_NULL_OR(IS_IN_SET_LAZY( lambda: gis.get_countries(key_type="code"))), label = T("Nationality"), represent = lambda code: gis.get_country(code, key_type="code") or UNKNOWN_OPT) Keyword parameters are same as for IS_IN_SET, except for labels, which is not replaced by a function that parallels theset_fn, since ordering is problematic if theset_fn returns a dict. """ def __init__( self, theset_fn, represent=None, error_message="value not allowed", multiple=False, zero="", sort=False, ): self.multiple = multiple if not callable(theset_fn): raise TypeError("Argument must be a callable.") self.theset_fn = theset_fn self.theset = None self.labels = None self.error_message = error_message self.zero = zero self.sort = sort # ------------------------------------------------------------------------- def _make_theset(self): theset = self.theset_fn() if theset: if isinstance(theset, dict): self.theset = [str(item) for item in theset] self.labels = theset.values() elif isinstance(theset, (tuple,list)): # @ToDo: Can this be a Rows? if isinstance(theset[0], (tuple,list)) and len(theset[0])==2: self.theset = [str(item) for item,label in theset] self.labels = [str(label) for item,label in theset] else: self.theset = [str(item) for item in theset] if represent: self.labels = [represent(item) for item in theset] else: self.theset = theset else: self.theset = [] # ------------------------------------------------------------------------- def options(self, zero=True): if not self.theset: self._make_theset() if not self.labels: items = [(k, k) for (i, k) in enumerate(self.theset)] else: items = [(k, self.labels[i]) for (i, k) in enumerate(self.theset)] if self.sort: items.sort(options_sorter) if zero and not self.zero is None and not self.multiple: items.insert(0, ("", self.zero)) return items # ------------------------------------------------------------------------- def __call__(self, value): if not self.theset: self._make_theset() if self.multiple: ### if below was values = re.compile("[\w\-:]+").findall(str(value)) if isinstance(value, (str,unicode)): values = [value] elif isinstance(value, (tuple, list)): values = value elif not value: values = [] else: values = [value] failures = [x for x in values if not x in self.theset] if failures and self.theset: if self.multiple and (value == None or value == ""): return ([], None) return (value, self.error_message) if self.multiple: if isinstance(self.multiple,(tuple,list)) and \ not self.multiple[0]<=len(values)<self.multiple[1]: return (values, self.error_message) return (values, None) return (value, None) # ============================================================================= class IS_TIME_INTERVAL_WIDGET(Validator): """ Simple validator for the S3TimeIntervalWidget, returns the selected time interval in seconds """ def __init__(self, field): self.field = field # ------------------------------------------------------------------------- def __call__(self, value): try: val = int(value) except ValueError: return (0, None) request = current.request _vars = request.post_vars try: mul = int(_vars[("%s_multiplier" % self.field).replace(".", "_")]) except ValueError: return (0, None) seconds = val * mul return (seconds, None) # END =========================================================================
mit
Leila20/django
tests/field_deconstruction/tests.py
21
18360
from __future__ import unicode_literals from django.apps import apps from django.db import models from django.test import SimpleTestCase, override_settings from django.test.utils import isolate_lru_cache from django.utils import six class FieldDeconstructionTests(SimpleTestCase): """ Tests the deconstruct() method on all core fields. """ def test_name(self): """ Tests the outputting of the correct name if assigned one. """ # First try using a "normal" field field = models.CharField(max_length=65) name, path, args, kwargs = field.deconstruct() self.assertIsNone(name) field.set_attributes_from_name("is_awesome_test") name, path, args, kwargs = field.deconstruct() self.assertEqual(name, "is_awesome_test") self.assertIsInstance(name, six.text_type) # Now try with a ForeignKey field = models.ForeignKey("some_fake.ModelName", models.CASCADE) name, path, args, kwargs = field.deconstruct() self.assertIsNone(name) field.set_attributes_from_name("author") name, path, args, kwargs = field.deconstruct() self.assertEqual(name, "author") def test_auto_field(self): field = models.AutoField(primary_key=True) field.set_attributes_from_name("id") name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.AutoField") self.assertEqual(args, []) self.assertEqual(kwargs, {"primary_key": True}) def test_big_integer_field(self): field = models.BigIntegerField() name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.BigIntegerField") self.assertEqual(args, []) self.assertEqual(kwargs, {}) def test_boolean_field(self): field = models.BooleanField() name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.BooleanField") self.assertEqual(args, []) self.assertEqual(kwargs, {}) field = models.BooleanField(default=True) name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.BooleanField") self.assertEqual(args, []) self.assertEqual(kwargs, {"default": True}) def test_char_field(self): field = models.CharField(max_length=65) name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.CharField") self.assertEqual(args, []) self.assertEqual(kwargs, {"max_length": 65}) field = models.CharField(max_length=65, null=True, blank=True) name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.CharField") self.assertEqual(args, []) self.assertEqual(kwargs, {"max_length": 65, "null": True, "blank": True}) def test_char_field_choices(self): field = models.CharField(max_length=1, choices=(("A", "One"), ("B", "Two"))) name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.CharField") self.assertEqual(args, []) self.assertEqual(kwargs, {"choices": [("A", "One"), ("B", "Two")], "max_length": 1}) def test_csi_field(self): field = models.CommaSeparatedIntegerField(max_length=100) name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.CommaSeparatedIntegerField") self.assertEqual(args, []) self.assertEqual(kwargs, {"max_length": 100}) def test_date_field(self): field = models.DateField() name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.DateField") self.assertEqual(args, []) self.assertEqual(kwargs, {}) field = models.DateField(auto_now=True) name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.DateField") self.assertEqual(args, []) self.assertEqual(kwargs, {"auto_now": True}) def test_datetime_field(self): field = models.DateTimeField() name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.DateTimeField") self.assertEqual(args, []) self.assertEqual(kwargs, {}) field = models.DateTimeField(auto_now_add=True) name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.DateTimeField") self.assertEqual(args, []) self.assertEqual(kwargs, {"auto_now_add": True}) # Bug #21785 field = models.DateTimeField(auto_now=True, auto_now_add=True) name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.DateTimeField") self.assertEqual(args, []) self.assertEqual(kwargs, {"auto_now_add": True, "auto_now": True}) def test_decimal_field(self): field = models.DecimalField(max_digits=5, decimal_places=2) name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.DecimalField") self.assertEqual(args, []) self.assertEqual(kwargs, {"max_digits": 5, "decimal_places": 2}) def test_decimal_field_0_decimal_places(self): """ A DecimalField with decimal_places=0 should work (#22272). """ field = models.DecimalField(max_digits=5, decimal_places=0) name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.DecimalField") self.assertEqual(args, []) self.assertEqual(kwargs, {"max_digits": 5, "decimal_places": 0}) def test_email_field(self): field = models.EmailField() name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.EmailField") self.assertEqual(args, []) self.assertEqual(kwargs, {"max_length": 254}) field = models.EmailField(max_length=255) name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.EmailField") self.assertEqual(args, []) self.assertEqual(kwargs, {"max_length": 255}) def test_file_field(self): field = models.FileField(upload_to="foo/bar") name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.FileField") self.assertEqual(args, []) self.assertEqual(kwargs, {"upload_to": "foo/bar"}) # Test max_length field = models.FileField(upload_to="foo/bar", max_length=200) name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.FileField") self.assertEqual(args, []) self.assertEqual(kwargs, {"upload_to": "foo/bar", "max_length": 200}) def test_file_path_field(self): field = models.FilePathField(match=r".*\.txt$") name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.FilePathField") self.assertEqual(args, []) self.assertEqual(kwargs, {"match": r".*\.txt$"}) field = models.FilePathField(recursive=True, allow_folders=True, max_length=123) name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.FilePathField") self.assertEqual(args, []) self.assertEqual(kwargs, {"recursive": True, "allow_folders": True, "max_length": 123}) def test_float_field(self): field = models.FloatField() name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.FloatField") self.assertEqual(args, []) self.assertEqual(kwargs, {}) def test_foreign_key(self): # Test basic pointing from django.contrib.auth.models import Permission field = models.ForeignKey("auth.Permission", models.CASCADE) field.remote_field.model = Permission field.remote_field.field_name = "id" name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.ForeignKey") self.assertEqual(args, []) self.assertEqual(kwargs, {"to": "auth.Permission", "on_delete": models.CASCADE}) self.assertFalse(hasattr(kwargs['to'], "setting_name")) # Test swap detection for swappable model field = models.ForeignKey("auth.User", models.CASCADE) name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.ForeignKey") self.assertEqual(args, []) self.assertEqual(kwargs, {"to": "auth.User", "on_delete": models.CASCADE}) self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL") # Test nonexistent (for now) model field = models.ForeignKey("something.Else", models.CASCADE) name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.ForeignKey") self.assertEqual(args, []) self.assertEqual(kwargs, {"to": "something.Else", "on_delete": models.CASCADE}) # Test on_delete field = models.ForeignKey("auth.User", models.SET_NULL) name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.ForeignKey") self.assertEqual(args, []) self.assertEqual(kwargs, {"to": "auth.User", "on_delete": models.SET_NULL}) # Test to_field preservation field = models.ForeignKey("auth.Permission", models.CASCADE, to_field="foobar") name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.ForeignKey") self.assertEqual(args, []) self.assertEqual(kwargs, {"to": "auth.Permission", "to_field": "foobar", "on_delete": models.CASCADE}) # Test related_name preservation field = models.ForeignKey("auth.Permission", models.CASCADE, related_name="foobar") name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.ForeignKey") self.assertEqual(args, []) self.assertEqual(kwargs, {"to": "auth.Permission", "related_name": "foobar", "on_delete": models.CASCADE}) @override_settings(AUTH_USER_MODEL="auth.Permission") def test_foreign_key_swapped(self): with isolate_lru_cache(apps.get_swappable_settings_name): # It doesn't matter that we swapped out user for permission; # there's no validation. We just want to check the setting stuff works. field = models.ForeignKey("auth.Permission", models.CASCADE) name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.ForeignKey") self.assertEqual(args, []) self.assertEqual(kwargs, {"to": "auth.Permission", "on_delete": models.CASCADE}) self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL") def test_image_field(self): field = models.ImageField(upload_to="foo/barness", width_field="width", height_field="height") name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.ImageField") self.assertEqual(args, []) self.assertEqual(kwargs, {"upload_to": "foo/barness", "width_field": "width", "height_field": "height"}) def test_integer_field(self): field = models.IntegerField() name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.IntegerField") self.assertEqual(args, []) self.assertEqual(kwargs, {}) def test_ip_address_field(self): field = models.IPAddressField() name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.IPAddressField") self.assertEqual(args, []) self.assertEqual(kwargs, {}) def test_generic_ip_address_field(self): field = models.GenericIPAddressField() name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.GenericIPAddressField") self.assertEqual(args, []) self.assertEqual(kwargs, {}) field = models.GenericIPAddressField(protocol="IPv6") name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.GenericIPAddressField") self.assertEqual(args, []) self.assertEqual(kwargs, {"protocol": "IPv6"}) def test_many_to_many_field(self): # Test normal field = models.ManyToManyField("auth.Permission") name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.ManyToManyField") self.assertEqual(args, []) self.assertEqual(kwargs, {"to": "auth.Permission"}) self.assertFalse(hasattr(kwargs['to'], "setting_name")) # Test swappable field = models.ManyToManyField("auth.User") name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.ManyToManyField") self.assertEqual(args, []) self.assertEqual(kwargs, {"to": "auth.User"}) self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL") # Test through field = models.ManyToManyField("auth.Permission", through="auth.Group") name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.ManyToManyField") self.assertEqual(args, []) self.assertEqual(kwargs, {"to": "auth.Permission", "through": "auth.Group"}) # Test custom db_table field = models.ManyToManyField("auth.Permission", db_table="custom_table") name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.ManyToManyField") self.assertEqual(args, []) self.assertEqual(kwargs, {"to": "auth.Permission", "db_table": "custom_table"}) # Test related_name field = models.ManyToManyField("auth.Permission", related_name="custom_table") name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.ManyToManyField") self.assertEqual(args, []) self.assertEqual(kwargs, {"to": "auth.Permission", "related_name": "custom_table"}) @override_settings(AUTH_USER_MODEL="auth.Permission") def test_many_to_many_field_swapped(self): with isolate_lru_cache(apps.get_swappable_settings_name): # It doesn't matter that we swapped out user for permission; # there's no validation. We just want to check the setting stuff works. field = models.ManyToManyField("auth.Permission") name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.ManyToManyField") self.assertEqual(args, []) self.assertEqual(kwargs, {"to": "auth.Permission"}) self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL") def test_null_boolean_field(self): field = models.NullBooleanField() name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.NullBooleanField") self.assertEqual(args, []) self.assertEqual(kwargs, {}) def test_positive_integer_field(self): field = models.PositiveIntegerField() name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.PositiveIntegerField") self.assertEqual(args, []) self.assertEqual(kwargs, {}) def test_positive_small_integer_field(self): field = models.PositiveSmallIntegerField() name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.PositiveSmallIntegerField") self.assertEqual(args, []) self.assertEqual(kwargs, {}) def test_slug_field(self): field = models.SlugField() name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.SlugField") self.assertEqual(args, []) self.assertEqual(kwargs, {}) field = models.SlugField(db_index=False, max_length=231) name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.SlugField") self.assertEqual(args, []) self.assertEqual(kwargs, {"db_index": False, "max_length": 231}) def test_small_integer_field(self): field = models.SmallIntegerField() name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.SmallIntegerField") self.assertEqual(args, []) self.assertEqual(kwargs, {}) def test_text_field(self): field = models.TextField() name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.TextField") self.assertEqual(args, []) self.assertEqual(kwargs, {}) def test_time_field(self): field = models.TimeField() name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.TimeField") self.assertEqual(args, []) self.assertEqual(kwargs, {}) field = models.TimeField(auto_now=True) name, path, args, kwargs = field.deconstruct() self.assertEqual(args, []) self.assertEqual(kwargs, {'auto_now': True}) field = models.TimeField(auto_now_add=True) name, path, args, kwargs = field.deconstruct() self.assertEqual(args, []) self.assertEqual(kwargs, {'auto_now_add': True}) def test_url_field(self): field = models.URLField() name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.URLField") self.assertEqual(args, []) self.assertEqual(kwargs, {}) field = models.URLField(max_length=231) name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.URLField") self.assertEqual(args, []) self.assertEqual(kwargs, {"max_length": 231}) def test_binary_field(self): field = models.BinaryField() name, path, args, kwargs = field.deconstruct() self.assertEqual(path, "django.db.models.BinaryField") self.assertEqual(args, []) self.assertEqual(kwargs, {})
bsd-3-clause
microcom/hr
hr_experience_analytic/hr_experience_analytic.py
21
1383
############################################################################### # # OpenERP, Open Source Management Solution # Copyright (C) 2013 Savoir-faire Linux (<http://www.savoirfairelinux.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################### from osv import orm, fields class hr_professional(orm.Model): _inherit = 'hr.experience' _columns = { 'analytic_id': fields.many2one('account.analytic.account', 'Project', domain=[('type', '!=', 'view')], help=""" Enter the project, contract or analytic account"""), }
agpl-3.0
ollie314/kuma
kuma/search/tests/test_types.py
3
1840
from elasticsearch_dsl import query from kuma.core.tests import eq_, ok_ from kuma.wiki.models import Document from kuma.wiki.search import WikiDocumentType from . import ElasticTestCase class WikiDocumentTypeTests(ElasticTestCase): fixtures = ElasticTestCase.fixtures + ['wiki/documents.json'] def test_get_excerpt_strips_html(self): self.refresh() results = WikiDocumentType.search().query('match', content='audio') ok_(results.count() > 0) for doc in results.execute(): excerpt = doc.get_excerpt() ok_('audio' in excerpt) ok_('<strong>' not in excerpt) def test_current_locale_results(self): self.refresh() results = (WikiDocumentType.search() .query(query.Match(title='article') | query.Match(content='article')) .filter('term', locale='en-US')) for doc in results.execute(): eq_('en-US', doc.locale) def test_get_excerpt_uses_summary(self): self.refresh() results = WikiDocumentType.search().query('match', content='audio') ok_(results.count() > 0) for doc in results.execute(): excerpt = doc.get_excerpt() ok_('the word for tough things' in excerpt) ok_('extra content' not in excerpt) def test_hidden_slugs_get_indexable(self): self.refresh() title_list = WikiDocumentType.get_indexable().values_list('title', flat=True) ok_('User:jezdez' not in title_list) def test_hidden_slugs_should_update(self): jezdez_doc = Document.objects.get(slug='User:jezdez') eq_(WikiDocumentType.should_update(jezdez_doc), False)
mpl-2.0
splav/servo
tests/wpt/web-platform-tests/webdriver/tests/new_window/new_window.py
18
1429
from tests.support.asserts import assert_success from . import opener, window_name def new_window(session, type_hint=None): return session.transport.send( "POST", "session/{session_id}/window/new".format(**vars(session)), {"type": type_hint}) def test_type_with_window(session): original_handles = session.handles response = new_window(session, type_hint="window") value = assert_success(response) handles = session.handles assert len(handles) == len(original_handles) + 1 assert value["handle"] in handles assert value["handle"] not in original_handles assert value["type"] == "window" def test_new_window_opens_about_blank(session): response = new_window(session, type_hint="window") value = assert_success(response) assert value["type"] == "window" session.window_handle = value["handle"] assert session.url == "about:blank" def test_new_window_sets_no_window_name(session): response = new_window(session, type_hint="window") value = assert_success(response) assert value["type"] == "window" session.window_handle = value["handle"] assert window_name(session) == "" def test_new_window_sets_no_opener(session): response = new_window(session, type_hint="window") value = assert_success(response) assert value["type"] == "window" session.window_handle = value["handle"] assert opener(session) is None
mpl-2.0
Intel-Corporation/tensorflow
tensorflow/contrib/distributions/python/ops/bijectors/softsign.py
35
3019
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Softsign bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import bijector from tensorflow.python.util import deprecation __all__ = [ "Softsign", ] class Softsign(bijector.Bijector): """Bijector which computes `Y = g(X) = X / (1 + |X|)`. The softsign `Bijector` has the following two useful properties: * The domain is all real numbers * `softsign(x) approx sgn(x)`, for large `|x|`. #### Examples ```python # Create the Y = softsign(X) transform. softsign = Softsign() x = [[[1., 2], [3, 4]], [[5, 6], [7, 8]]] x / (1 + abs(x)) == softsign.forward(x) x / (1 - abs(x)) == softsign.inverse(x) ``` """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, validate_args=False, name="softsign"): super(Softsign, self).__init__( forward_min_event_ndims=0, validate_args=validate_args, name=name) def _forward(self, x): return x / (1. + math_ops.abs(x)) def _inverse(self, y): y = self._maybe_assert_valid_y(y) return y / (1. - math_ops.abs(y)) def _forward_log_det_jacobian(self, x): return -2. * math_ops.log1p(math_ops.abs(x)) def _inverse_log_det_jacobian(self, y): y = self._maybe_assert_valid_y(y) return -2. * math_ops.log1p(-math_ops.abs(y)) def _maybe_assert_valid_y(self, y): if not self.validate_args: return y is_valid = [ check_ops.assert_greater( y, math_ops.cast(-1., dtype=y.dtype.base_dtype), message="Inverse transformation input must be greater than -1."), check_ops.assert_less( y, math_ops.cast(1., dtype=y.dtype.base_dtype), message="Inverse transformation input must be less than 1.") ] return control_flow_ops.with_dependencies(is_valid, y)
apache-2.0
blois/AndroidSDKCloneMin
ndk/prebuilt/linux-x86_64/lib/python2.7/wsgiref/validate.py
114
14731
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org) # Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php # Also licenced under the Apache License, 2.0: http://opensource.org/licenses/apache2.0.php # Licensed to PSF under a Contributor Agreement """ Middleware to check for obedience to the WSGI specification. Some of the things this checks: * Signature of the application and start_response (including that keyword arguments are not used). * Environment checks: - Environment is a dictionary (and not a subclass). - That all the required keys are in the environment: REQUEST_METHOD, SERVER_NAME, SERVER_PORT, wsgi.version, wsgi.input, wsgi.errors, wsgi.multithread, wsgi.multiprocess, wsgi.run_once - That HTTP_CONTENT_TYPE and HTTP_CONTENT_LENGTH are not in the environment (these headers should appear as CONTENT_LENGTH and CONTENT_TYPE). - Warns if QUERY_STRING is missing, as the cgi module acts unpredictably in that case. - That CGI-style variables (that don't contain a .) have (non-unicode) string values - That wsgi.version is a tuple - That wsgi.url_scheme is 'http' or 'https' (@@: is this too restrictive?) - Warns if the REQUEST_METHOD is not known (@@: probably too restrictive). - That SCRIPT_NAME and PATH_INFO are empty or start with / - That at least one of SCRIPT_NAME or PATH_INFO are set. - That CONTENT_LENGTH is a positive integer. - That SCRIPT_NAME is not '/' (it should be '', and PATH_INFO should be '/'). - That wsgi.input has the methods read, readline, readlines, and __iter__ - That wsgi.errors has the methods flush, write, writelines * The status is a string, contains a space, starts with an integer, and that integer is in range (> 100). * That the headers is a list (not a subclass, not another kind of sequence). * That the items of the headers are tuples of strings. * That there is no 'status' header (that is used in CGI, but not in WSGI). * That the headers don't contain newlines or colons, end in _ or -, or contain characters codes below 037. * That Content-Type is given if there is content (CGI often has a default content type, but WSGI does not). * That no Content-Type is given when there is no content (@@: is this too restrictive?) * That the exc_info argument to start_response is a tuple or None. * That all calls to the writer are with strings, and no other methods on the writer are accessed. * That wsgi.input is used properly: - .read() is called with zero or one argument - That it returns a string - That readline, readlines, and __iter__ return strings - That .close() is not called - No other methods are provided * That wsgi.errors is used properly: - .write() and .writelines() is called with a string - That .close() is not called, and no other methods are provided. * The response iterator: - That it is not a string (it should be a list of a single string; a string will work, but perform horribly). - That .next() returns a string - That the iterator is not iterated over until start_response has been called (that can signal either a server or application error). - That .close() is called (doesn't raise exception, only prints to sys.stderr, because we only know it isn't called when the object is garbage collected). """ __all__ = ['validator'] import re import sys from types import DictType, StringType, TupleType, ListType import warnings header_re = re.compile(r'^[a-zA-Z][a-zA-Z0-9\-_]*$') bad_header_value_re = re.compile(r'[\000-\037]') class WSGIWarning(Warning): """ Raised in response to WSGI-spec-related warnings """ def assert_(cond, *args): if not cond: raise AssertionError(*args) def validator(application): """ When applied between a WSGI server and a WSGI application, this middleware will check for WSGI compliancy on a number of levels. This middleware does not modify the request or response in any way, but will raise an AssertionError if anything seems off (except for a failure to close the application iterator, which will be printed to stderr -- there's no way to raise an exception at that point). """ def lint_app(*args, **kw): assert_(len(args) == 2, "Two arguments required") assert_(not kw, "No keyword arguments allowed") environ, start_response = args check_environ(environ) # We use this to check if the application returns without # calling start_response: start_response_started = [] def start_response_wrapper(*args, **kw): assert_(len(args) == 2 or len(args) == 3, ( "Invalid number of arguments: %s" % (args,))) assert_(not kw, "No keyword arguments allowed") status = args[0] headers = args[1] if len(args) == 3: exc_info = args[2] else: exc_info = None check_status(status) check_headers(headers) check_content_type(status, headers) check_exc_info(exc_info) start_response_started.append(None) return WriteWrapper(start_response(*args)) environ['wsgi.input'] = InputWrapper(environ['wsgi.input']) environ['wsgi.errors'] = ErrorWrapper(environ['wsgi.errors']) iterator = application(environ, start_response_wrapper) assert_(iterator is not None and iterator != False, "The application must return an iterator, if only an empty list") check_iterator(iterator) return IteratorWrapper(iterator, start_response_started) return lint_app class InputWrapper: def __init__(self, wsgi_input): self.input = wsgi_input def read(self, *args): assert_(len(args) <= 1) v = self.input.read(*args) assert_(type(v) is type("")) return v def readline(self): v = self.input.readline() assert_(type(v) is type("")) return v def readlines(self, *args): assert_(len(args) <= 1) lines = self.input.readlines(*args) assert_(type(lines) is type([])) for line in lines: assert_(type(line) is type("")) return lines def __iter__(self): while 1: line = self.readline() if not line: return yield line def close(self): assert_(0, "input.close() must not be called") class ErrorWrapper: def __init__(self, wsgi_errors): self.errors = wsgi_errors def write(self, s): assert_(type(s) is type("")) self.errors.write(s) def flush(self): self.errors.flush() def writelines(self, seq): for line in seq: self.write(line) def close(self): assert_(0, "errors.close() must not be called") class WriteWrapper: def __init__(self, wsgi_writer): self.writer = wsgi_writer def __call__(self, s): assert_(type(s) is type("")) self.writer(s) class PartialIteratorWrapper: def __init__(self, wsgi_iterator): self.iterator = wsgi_iterator def __iter__(self): # We want to make sure __iter__ is called return IteratorWrapper(self.iterator, None) class IteratorWrapper: def __init__(self, wsgi_iterator, check_start_response): self.original_iterator = wsgi_iterator self.iterator = iter(wsgi_iterator) self.closed = False self.check_start_response = check_start_response def __iter__(self): return self def next(self): assert_(not self.closed, "Iterator read after closed") v = self.iterator.next() if self.check_start_response is not None: assert_(self.check_start_response, "The application returns and we started iterating over its body, but start_response has not yet been called") self.check_start_response = None return v def close(self): self.closed = True if hasattr(self.original_iterator, 'close'): self.original_iterator.close() def __del__(self): if not self.closed: sys.stderr.write( "Iterator garbage collected without being closed") assert_(self.closed, "Iterator garbage collected without being closed") def check_environ(environ): assert_(type(environ) is DictType, "Environment is not of the right type: %r (environment: %r)" % (type(environ), environ)) for key in ['REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT', 'wsgi.version', 'wsgi.input', 'wsgi.errors', 'wsgi.multithread', 'wsgi.multiprocess', 'wsgi.run_once']: assert_(key in environ, "Environment missing required key: %r" % (key,)) for key in ['HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH']: assert_(key not in environ, "Environment should not have the key: %s " "(use %s instead)" % (key, key[5:])) if 'QUERY_STRING' not in environ: warnings.warn( 'QUERY_STRING is not in the WSGI environment; the cgi ' 'module will use sys.argv when this variable is missing, ' 'so application errors are more likely', WSGIWarning) for key in environ.keys(): if '.' in key: # Extension, we don't care about its type continue assert_(type(environ[key]) is StringType, "Environmental variable %s is not a string: %r (value: %r)" % (key, type(environ[key]), environ[key])) assert_(type(environ['wsgi.version']) is TupleType, "wsgi.version should be a tuple (%r)" % (environ['wsgi.version'],)) assert_(environ['wsgi.url_scheme'] in ('http', 'https'), "wsgi.url_scheme unknown: %r" % environ['wsgi.url_scheme']) check_input(environ['wsgi.input']) check_errors(environ['wsgi.errors']) # @@: these need filling out: if environ['REQUEST_METHOD'] not in ( 'GET', 'HEAD', 'POST', 'OPTIONS','PUT','DELETE','TRACE'): warnings.warn( "Unknown REQUEST_METHOD: %r" % environ['REQUEST_METHOD'], WSGIWarning) assert_(not environ.get('SCRIPT_NAME') or environ['SCRIPT_NAME'].startswith('/'), "SCRIPT_NAME doesn't start with /: %r" % environ['SCRIPT_NAME']) assert_(not environ.get('PATH_INFO') or environ['PATH_INFO'].startswith('/'), "PATH_INFO doesn't start with /: %r" % environ['PATH_INFO']) if environ.get('CONTENT_LENGTH'): assert_(int(environ['CONTENT_LENGTH']) >= 0, "Invalid CONTENT_LENGTH: %r" % environ['CONTENT_LENGTH']) if not environ.get('SCRIPT_NAME'): assert_('PATH_INFO' in environ, "One of SCRIPT_NAME or PATH_INFO are required (PATH_INFO " "should at least be '/' if SCRIPT_NAME is empty)") assert_(environ.get('SCRIPT_NAME') != '/', "SCRIPT_NAME cannot be '/'; it should instead be '', and " "PATH_INFO should be '/'") def check_input(wsgi_input): for attr in ['read', 'readline', 'readlines', '__iter__']: assert_(hasattr(wsgi_input, attr), "wsgi.input (%r) doesn't have the attribute %s" % (wsgi_input, attr)) def check_errors(wsgi_errors): for attr in ['flush', 'write', 'writelines']: assert_(hasattr(wsgi_errors, attr), "wsgi.errors (%r) doesn't have the attribute %s" % (wsgi_errors, attr)) def check_status(status): assert_(type(status) is StringType, "Status must be a string (not %r)" % status) # Implicitly check that we can turn it into an integer: status_code = status.split(None, 1)[0] assert_(len(status_code) == 3, "Status codes must be three characters: %r" % status_code) status_int = int(status_code) assert_(status_int >= 100, "Status code is invalid: %r" % status_int) if len(status) < 4 or status[3] != ' ': warnings.warn( "The status string (%r) should be a three-digit integer " "followed by a single space and a status explanation" % status, WSGIWarning) def check_headers(headers): assert_(type(headers) is ListType, "Headers (%r) must be of type list: %r" % (headers, type(headers))) header_names = {} for item in headers: assert_(type(item) is TupleType, "Individual headers (%r) must be of type tuple: %r" % (item, type(item))) assert_(len(item) == 2) name, value = item assert_(name.lower() != 'status', "The Status header cannot be used; it conflicts with CGI " "script, and HTTP status is not given through headers " "(value: %r)." % value) header_names[name.lower()] = None assert_('\n' not in name and ':' not in name, "Header names may not contain ':' or '\\n': %r" % name) assert_(header_re.search(name), "Bad header name: %r" % name) assert_(not name.endswith('-') and not name.endswith('_'), "Names may not end in '-' or '_': %r" % name) if bad_header_value_re.search(value): assert_(0, "Bad header value: %r (bad char: %r)" % (value, bad_header_value_re.search(value).group(0))) def check_content_type(status, headers): code = int(status.split(None, 1)[0]) # @@: need one more person to verify this interpretation of RFC 2616 # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html NO_MESSAGE_BODY = (204, 304) for name, value in headers: if name.lower() == 'content-type': if code not in NO_MESSAGE_BODY: return assert_(0, ("Content-Type header found in a %s response, " "which must not return content.") % code) if code not in NO_MESSAGE_BODY: assert_(0, "No Content-Type header found in headers (%s)" % headers) def check_exc_info(exc_info): assert_(exc_info is None or type(exc_info) is type(()), "exc_info (%r) is not a tuple: %r" % (exc_info, type(exc_info))) # More exc_info checks? def check_iterator(iterator): # Technically a string is legal, which is why it's a really bad # idea, because it may cause the response to be returned # character-by-character assert_(not isinstance(iterator, str), "You should not return a string as your application iterator, " "instead return a single-item list containing that string.")
apache-2.0
betoesquivel/fil2014
build/django/build/lib.linux-x86_64-2.7/django/contrib/admin/tests.py
104
4914
import os from django.test import LiveServerTestCase from django.utils.module_loading import import_by_path from django.utils.unittest import SkipTest from django.utils.translation import ugettext as _ class AdminSeleniumWebDriverTestCase(LiveServerTestCase): available_apps = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', ] webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver' @classmethod def setUpClass(cls): if not os.environ.get('DJANGO_SELENIUM_TESTS', False): raise SkipTest('Selenium tests not requested') try: cls.selenium = import_by_path(cls.webdriver_class)() except Exception as e: raise SkipTest('Selenium webdriver "%s" not installed or not ' 'operational: %s' % (cls.webdriver_class, str(e))) # This has to be last to ensure that resources are cleaned up properly! super(AdminSeleniumWebDriverTestCase, cls).setUpClass() @classmethod def _tearDownClassInternal(cls): if hasattr(cls, 'selenium'): cls.selenium.quit() super(AdminSeleniumWebDriverTestCase, cls)._tearDownClassInternal() def wait_until(self, callback, timeout=10): """ Helper function that blocks the execution of the tests until the specified callback returns a value that is not falsy. This function can be called, for example, after clicking a link or submitting a form. See the other public methods that call this function for more details. """ from selenium.webdriver.support.wait import WebDriverWait WebDriverWait(self.selenium, timeout).until(callback) def wait_loaded_tag(self, tag_name, timeout=10): """ Helper function that blocks until the element with the given tag name is found on the page. """ self.wait_until( lambda driver: driver.find_element_by_tag_name(tag_name), timeout ) def wait_page_loaded(self): """ Block until page has started to load. """ from selenium.common.exceptions import TimeoutException try: # Wait for the next page to be loaded self.wait_loaded_tag('body') except TimeoutException: # IE7 occasionnally returns an error "Internet Explorer cannot # display the webpage" and doesn't load the next page. We just # ignore it. pass def admin_login(self, username, password, login_url='/admin/'): """ Helper function to log into the admin. """ self.selenium.get('%s%s' % (self.live_server_url, login_url)) username_input = self.selenium.find_element_by_name('username') username_input.send_keys(username) password_input = self.selenium.find_element_by_name('password') password_input.send_keys(password) login_text = _('Log in') self.selenium.find_element_by_xpath( '//input[@value="%s"]' % login_text).click() self.wait_page_loaded() def get_css_value(self, selector, attribute): """ Helper function that returns the value for the CSS attribute of an DOM element specified by the given selector. Uses the jQuery that ships with Django. """ return self.selenium.execute_script( 'return django.jQuery("%s").css("%s")' % (selector, attribute)) def get_select_option(self, selector, value): """ Returns the <OPTION> with the value `value` inside the <SELECT> widget identified by the CSS selector `selector`. """ from selenium.common.exceptions import NoSuchElementException options = self.selenium.find_elements_by_css_selector('%s > option' % selector) for option in options: if option.get_attribute('value') == value: return option raise NoSuchElementException('Option "%s" not found in "%s"' % (value, selector)) def assertSelectOptions(self, selector, values): """ Asserts that the <SELECT> widget identified by `selector` has the options with the given `values`. """ options = self.selenium.find_elements_by_css_selector('%s > option' % selector) actual_values = [] for option in options: actual_values.append(option.get_attribute('value')) self.assertEqual(values, actual_values) def has_css_class(self, selector, klass): """ Returns True if the element identified by `selector` has the CSS class `klass`. """ return (self.selenium.find_element_by_css_selector(selector) .get_attribute('class').find(klass) != -1)
mit
myd7349/Ongoing-Study
python/RegKey.py
2
3094
# -*- coding: utf-8 -*- # 2014-10-29T19:30+08:00 # By myd7349(myd7349@gmail.com) # TODO: documentation and unit test import os.path import winreg # These 2 functions below are part of the internal implementation, # and you should not call them directly. def _OpenKeyToEnum(key, sub_key): if sub_key != None and not isinstance(sub_key, str): raise TypeError('argument "sub_key" must be str or None') if sub_key != None and sub_key != '': return winreg.OpenKeyEx(key, sub_key, 0, winreg.KEY_READ) else: return key def _CloseKeyToEnum(key, key_to_enum): if key_to_enum and key_to_enum != key: winreg.CloseKey(key_to_enum) _Join = os.path.join def _EnumSubKeyImpl(key, sub_key = None, recursive = False, first_recursion = True): key_to_enum = _OpenKeyToEnum(key, sub_key) sub_key_cnt, val_cnt, last_modified = winreg.QueryInfoKey(key_to_enum) if recursive: if first_recursion: for i in range(sub_key_cnt): yield from _EnumSubKeyImpl(key_to_enum, winreg.EnumKey(key_to_enum, i), recursive, False) else: if sub_key_cnt == 0: yield sub_key else: for i in range(sub_key_cnt): new_sub_key = _Join(sub_key, winreg.EnumKey(key_to_enum, i)) yield from _EnumSubKeyImpl(key, new_sub_key, recursive, False) else: for i in range(sub_key_cnt): yield winreg.EnumKey(key_to_enum, i) _CloseKeyToEnum(key, key_to_enum) # The old version of EnumSubKey looks like this: #EnumSubKey = functools.update_wrapper( # functools.partial(_EnumSubKeyImpl, first_recursion = True), _EnumSubKeyImpl) # in which case 'help(EnumSubKey)' will not work but 'print(EnumSubKey.__doc__)' works. # So I may need some black-magic: # http://stackoverflow.com/questions/16672856/allow-help-to-work-on-partial-function-object # But it is even more confusing. So I rewrite it. def EnumSubKey(key, sub_key = None, recursive = False): '''Enumerate all sub-keys under specified registry item.''' return _EnumSubKeyImpl(key, sub_key, recursive, True) # See issue #1 def EnumValue(key, sub_key = None, recursive = False): '''Enumerate all values under specified registry item.''' key_to_enum = _OpenKeyToEnum(key, sub_key) sub_key_cnt, val_cnt, last_modified = winreg.QueryInfoKey(key_to_enum) for i in range(val_cnt): yield ('', ) + winreg.EnumValue(key_to_enum, i) if recursive: for new_sub_key in EnumSubKey(key, sub_key, True): for parent_key, name, data, data_type in \ EnumValue(key, _Join(sub_key, new_sub_key), True): yield (_Join(parent_key, new_sub_key), name, data, data_type) _CloseKeyToEnum(key, key_to_enum) def SearchValue(key, sub_key, value_name, recursive = False): return filter(lambda value_tuple: value_tuple[1] == value_name, EnumValue(key, sub_key, recursive))
lgpl-3.0
SphinxKnight/kuma
kuma/core/tests/test_pagination.py
1
1899
from __future__ import unicode_literals import pyquery from django.test import RequestFactory from ..templatetags.jinja_helpers import paginator from ..urlresolvers import reverse from ..utils import paginate, urlparams def test_paginated_url(): """Avoid duplicating page param in pagination.""" url = urlparams(reverse('search'), q='bookmarks', page=2) request = RequestFactory().get(url) queryset = [{}, {}] paginated = paginate(request, queryset) assert (paginated.url == request.build_absolute_uri(request.path) + '?q=bookmarks') def test_invalid_page_param(): url = urlparams(reverse('search'), page='a') request = RequestFactory().get(url) queryset = range(100) paginated = paginate(request, queryset) assert (paginated.url == request.build_absolute_uri(request.path) + '?') def test_paginator_filter_num_elements_start(): # Correct number of <li>s on page 1. url = reverse('search') request = RequestFactory().get(url) pager = paginate(request, range(100), per_page=9) html = paginator(pager) doc = pyquery.PyQuery(html) assert 11 == len(doc('li')) def test_paginator_filter_num_elements_middle(): # Correct number of <li>s in the middle. url = urlparams(reverse('search'), page=10) request = RequestFactory().get(url) pager = paginate(request, range(200), per_page=10) html = paginator(pager) doc = pyquery.PyQuery(html) assert 13 == len(doc('li')) def test_paginator_filter_current_selected(): # Ensure the current page has 'class="selected"'. url = urlparams(reverse('search'), page=10) request = RequestFactory().get(url) pager = paginate(request, range(200), per_page=10) html = paginator(pager) doc = pyquery.PyQuery(html) assert (doc('li.selected a').attr('href') == 'http://testserver/en-US/search?page=10')
mpl-2.0
activityworkshop/Murmeli
murmeli/pages/messages.py
1
4728
'''Module for the messages pageset''' from murmeli.pages.base import PageSet from murmeli.pagetemplate import PageTemplate from murmeli import dbutils from murmeli.contactmgr import ContactManager from murmeli.messageutils import MessageTree from murmeli import inbox class MessagesPageSet(PageSet): '''Messages page set, for showing list of messages etc''' def __init__(self, system): PageSet.__init__(self, system, "messages") self.messages_template = PageTemplate('messages') def serve_page(self, view, url, params): '''Serve a page to the given view''' print("Messages serving page", url, "params:", params) self.require_resources(['button-compose.png', 'default.css', 'avatar-none.jpg']) database = self.system.get_component(self.system.COMPNAME_DATABASE) dbutils.export_all_avatars(database, self.get_web_cache_dir()) self._process_command(url, params) # Make dictionary to convert ids to names contact_names = {cont['torid']:cont['displayName'] for cont in database.get_profiles()} unknown_sender = self.i18n("messages.sender.unknown") unknown_recpt = self.i18n("messages.recpt.unknown") message_list = database.get_inbox() if database else [] conreqs = [] conresps = [] mail_tree = MessageTree() for msg in message_list: if not msg or msg.get(inbox.FN_DELETED): continue timestamp = msg.get(inbox.FN_TIMESTAMP) msg[inbox.FN_SENT_TIME_STR] = self.make_local_time_string(timestamp) msg_type = msg.get(inbox.FN_MSG_TYPE) # Lookup sender name for display sender_id = msg.get(inbox.FN_FROM_ID) if not msg.get(inbox.FN_FROM_NAME): msg[inbox.FN_FROM_NAME] = contact_names.get(sender_id, unknown_sender) if msg_type in ["contactrequest", "contactrefer"]: conreqs.append(msg) elif msg_type == "contactresponse": msg[inbox.FN_MSG_BODY] = self.fix_conresp_body(msg.get(inbox.FN_MSG_BODY), msg.get(inbox.FN_ACCEPTED)) conresps.append(msg) elif msg_type == "normal": recpts = msg.get(inbox.FN_RECIPIENTS) if recpts: reply_all = recpts.split(",") recpt_name_list = [contact_names.get(i, unknown_recpt) for i in reply_all] msg[inbox.FN_RECIPIENT_NAMES] = ", ".join(recpt_name_list) reply_all.append(sender_id) msg[inbox.FN_REPLY_ALL] = ",".join(reply_all) mail_tree.add_msg(msg) mails = mail_tree.build() num_msgs = len(conreqs) + len(conresps) + len(mails) bodytext = self.messages_template.get_html(self.get_all_i18n(), {"contactrequests":conreqs, "contactresponses":conresps, "mails":mails, "nummessages":num_msgs, "webcachedir":self.get_web_cache_dir()}) contents = self.build_page({'pageTitle':self.i18n("messages.title"), 'pageBody':bodytext, 'pageFooter':"<p>Footer</p>"}) view.set_html(contents) def _process_command(self, url, params): '''Process a command given by the url and params''' database = self.system.get_component(self.system.COMPNAME_DATABASE) if url == 'send': if params.get('messageType') == "contactresponse": if params.get('accept') == "1": crypto = self.system.get_component(self.system.COMPNAME_CRYPTO) ContactManager(database, crypto).handle_accept(params.get('sendTo'), params.get('messageBody')) else: ContactManager(database, None).handle_deny(params.get('sendTo')) elif url == 'delete': msg_index = self.get_param_as_int(params, 'msgId') if msg_index >= 0 and not database.delete_from_inbox(msg_index): print("Delete of inbox message '%d' failed" % msg_index) def fix_conresp_body(self, msg_body, accepted): '''If a contact response message has a blank message body, replace it''' if msg_body: return msg_body suffix = "acceptednomessage" if accepted else "refused" return self.i18n("messages.contactrequest." + suffix)
gpl-2.0
Zyell/home-assistant
homeassistant/components/notify/smtp.py
12
3892
""" Mail (SMTP) notification service. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/notify.smtp/ """ import logging import smtplib from email.mime.text import MIMEText from homeassistant.components.notify import ( ATTR_TITLE, DOMAIN, BaseNotificationService) from homeassistant.helpers import validate_config _LOGGER = logging.getLogger(__name__) def get_service(hass, config): """Get the mail notification service.""" if not validate_config({DOMAIN: config}, {DOMAIN: ['recipient']}, _LOGGER): return None smtp_server = config.get('server', 'localhost') port = int(config.get('port', '25')) username = config.get('username', None) password = config.get('password', None) starttls = int(config.get('starttls', 0)) debug = config.get('debug', 0) server = None try: server = smtplib.SMTP(smtp_server, port, timeout=5) server.set_debuglevel(debug) server.ehlo() if starttls == 1: server.starttls() server.ehlo() if username and password: try: server.login(username, password) except (smtplib.SMTPException, smtplib.SMTPSenderRefused): _LOGGER.exception("Please check your settings.") return None except smtplib.socket.gaierror: _LOGGER.exception( "SMTP server not found (%s:%s). " "Please check the IP address or hostname of your SMTP server.", smtp_server, port) return None except smtplib.SMTPAuthenticationError: _LOGGER.exception( "Login not possible. " "Please check your setting and/or your credentials.") return None finally: if server: server.quit() return MailNotificationService( smtp_server, port, config['sender'], starttls, username, password, config['recipient'], debug) # pylint: disable=too-few-public-methods, too-many-instance-attributes class MailNotificationService(BaseNotificationService): """Implement the notification service for E-Mail messages.""" # pylint: disable=too-many-arguments def __init__(self, server, port, sender, starttls, username, password, recipient, debug): """Initialize the service.""" self._server = server self._port = port self._sender = sender self.starttls = starttls self.username = username self.password = password self.recipient = recipient self.debug = debug self.tries = 2 def connect(self): """Connect/authenticate to SMTP Server.""" mail = smtplib.SMTP(self._server, self._port, timeout=5) mail.set_debuglevel(self.debug) mail.ehlo_or_helo_if_needed() if self.starttls == 1: mail.starttls() mail.ehlo() if self.username and self.password: mail.login(self.username, self.password) return mail def send_message(self, message="", **kwargs): """Send a message to a user.""" mail = self.connect() subject = kwargs.get(ATTR_TITLE) msg = MIMEText(message) msg['Subject'] = subject msg['To'] = self.recipient msg['From'] = self._sender msg['X-Mailer'] = 'HomeAssistant' for _ in range(self.tries): try: mail.sendmail(self._sender, self.recipient, msg.as_string()) break except smtplib.SMTPException: _LOGGER.warning('SMTPException sending mail: ' 'retrying connection') mail.quit() mail = self.connect() mail.quit()
mit
InstaMineNuggetsCLASSB/InstaMineNuggetsCLASSB
contrib/spendfrom/spendfrom.py
792
10053
#!/usr/bin/env python # # Use the raw transactions API to spend bitcoins received on particular addresses, # and send any change back to that same address. # # Example usage: # spendfrom.py # Lists available funds # spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00 # # Assumes it will talk to a bitcoind or Bitcoin-Qt running # on localhost. # # Depends on jsonrpc # from decimal import * import getpass import math import os import os.path import platform import sys import time from jsonrpc import ServiceProxy, json BASE_FEE=Decimal("0.001") def check_json_precision(): """Make sure json library being used does not lose precision converting BTC values""" n = Decimal("20000000.00000003") satoshis = int(json.loads(json.dumps(float(n)))*1.0e8) if satoshis != 2000000000000003: raise RuntimeError("JSON encode/decode loses precision") def determine_db_dir(): """Return the default location of the bitcoin data directory""" if platform.system() == "Darwin": return os.path.expanduser("~/Library/Application Support/Bitcoin/") elif platform.system() == "Windows": return os.path.join(os.environ['APPDATA'], "Bitcoin") return os.path.expanduser("~/.bitcoin") def read_bitcoin_config(dbdir): """Read the bitcoin.conf file from dbdir, returns dictionary of settings""" from ConfigParser import SafeConfigParser class FakeSecHead(object): def __init__(self, fp): self.fp = fp self.sechead = '[all]\n' def readline(self): if self.sechead: try: return self.sechead finally: self.sechead = None else: s = self.fp.readline() if s.find('#') != -1: s = s[0:s.find('#')].strip() +"\n" return s config_parser = SafeConfigParser() config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf")))) return dict(config_parser.items("all")) def connect_JSON(config): """Connect to a bitcoin JSON-RPC server""" testnet = config.get('testnet', '0') testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False if not 'rpcport' in config: config['rpcport'] = 19332 if testnet else 9332 connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport']) try: result = ServiceProxy(connect) # ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors, # but also make sure the bitcoind we're talking to is/isn't testnet: if result.getmininginfo()['testnet'] != testnet: sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n") sys.exit(1) return result except: sys.stderr.write("Error connecting to RPC server at "+connect+"\n") sys.exit(1) def unlock_wallet(bitcoind): info = bitcoind.getinfo() if 'unlocked_until' not in info: return True # wallet is not encrypted t = int(info['unlocked_until']) if t <= time.time(): try: passphrase = getpass.getpass("Wallet is locked; enter passphrase: ") bitcoind.walletpassphrase(passphrase, 5) except: sys.stderr.write("Wrong passphrase\n") info = bitcoind.getinfo() return int(info['unlocked_until']) > time.time() def list_available(bitcoind): address_summary = dict() address_to_account = dict() for info in bitcoind.listreceivedbyaddress(0): address_to_account[info["address"]] = info["account"] unspent = bitcoind.listunspent(0) for output in unspent: # listunspent doesn't give addresses, so: rawtx = bitcoind.getrawtransaction(output['txid'], 1) vout = rawtx["vout"][output['vout']] pk = vout["scriptPubKey"] # This code only deals with ordinary pay-to-bitcoin-address # or pay-to-script-hash outputs right now; anything exotic is ignored. if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash": continue address = pk["addresses"][0] if address in address_summary: address_summary[address]["total"] += vout["value"] address_summary[address]["outputs"].append(output) else: address_summary[address] = { "total" : vout["value"], "outputs" : [output], "account" : address_to_account.get(address, "") } return address_summary def select_coins(needed, inputs): # Feel free to improve this, this is good enough for my simple needs: outputs = [] have = Decimal("0.0") n = 0 while have < needed and n < len(inputs): outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]}) have += inputs[n]["amount"] n += 1 return (outputs, have-needed) def create_tx(bitcoind, fromaddresses, toaddress, amount, fee): all_coins = list_available(bitcoind) total_available = Decimal("0.0") needed = amount+fee potential_inputs = [] for addr in fromaddresses: if addr not in all_coins: continue potential_inputs.extend(all_coins[addr]["outputs"]) total_available += all_coins[addr]["total"] if total_available < needed: sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed)); sys.exit(1) # # Note: # Python's json/jsonrpc modules have inconsistent support for Decimal numbers. # Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode # Decimals, I'm casting amounts to float before sending them to bitcoind. # outputs = { toaddress : float(amount) } (inputs, change_amount) = select_coins(needed, potential_inputs) if change_amount > BASE_FEE: # don't bother with zero or tiny change change_address = fromaddresses[-1] if change_address in outputs: outputs[change_address] += float(change_amount) else: outputs[change_address] = float(change_amount) rawtx = bitcoind.createrawtransaction(inputs, outputs) signed_rawtx = bitcoind.signrawtransaction(rawtx) if not signed_rawtx["complete"]: sys.stderr.write("signrawtransaction failed\n") sys.exit(1) txdata = signed_rawtx["hex"] return txdata def compute_amount_in(bitcoind, txinfo): result = Decimal("0.0") for vin in txinfo['vin']: in_info = bitcoind.getrawtransaction(vin['txid'], 1) vout = in_info['vout'][vin['vout']] result = result + vout['value'] return result def compute_amount_out(txinfo): result = Decimal("0.0") for vout in txinfo['vout']: result = result + vout['value'] return result def sanity_test_fee(bitcoind, txdata_hex, max_fee): class FeeError(RuntimeError): pass try: txinfo = bitcoind.decoderawtransaction(txdata_hex) total_in = compute_amount_in(bitcoind, txinfo) total_out = compute_amount_out(txinfo) if total_in-total_out > max_fee: raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out)) tx_size = len(txdata_hex)/2 kb = tx_size/1000 # integer division rounds down if kb > 1 and fee < BASE_FEE: raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes") if total_in < 0.01 and fee < BASE_FEE: raise FeeError("Rejecting no-fee, tiny-amount transaction") # Exercise for the reader: compute transaction priority, and # warn if this is a very-low-priority transaction except FeeError as err: sys.stderr.write((str(err)+"\n")) sys.exit(1) def main(): import optparse parser = optparse.OptionParser(usage="%prog [options]") parser.add_option("--from", dest="fromaddresses", default=None, help="addresses to get bitcoins from") parser.add_option("--to", dest="to", default=None, help="address to get send bitcoins to") parser.add_option("--amount", dest="amount", default=None, help="amount to send") parser.add_option("--fee", dest="fee", default="0.0", help="fee to include") parser.add_option("--datadir", dest="datadir", default=determine_db_dir(), help="location of bitcoin.conf file with RPC username/password (default: %default)") parser.add_option("--testnet", dest="testnet", default=False, action="store_true", help="Use the test network") parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true", help="Don't broadcast the transaction, just create and print the transaction data") (options, args) = parser.parse_args() check_json_precision() config = read_bitcoin_config(options.datadir) if options.testnet: config['testnet'] = True bitcoind = connect_JSON(config) if options.amount is None: address_summary = list_available(bitcoind) for address,info in address_summary.iteritems(): n_transactions = len(info['outputs']) if n_transactions > 1: print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions)) else: print("%s %.8f %s"%(address, info['total'], info['account'])) else: fee = Decimal(options.fee) amount = Decimal(options.amount) while unlock_wallet(bitcoind) == False: pass # Keep asking for passphrase until they get it right txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee) sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01")) if options.dry_run: print(txdata) else: txid = bitcoind.sendrawtransaction(txdata) print(txid) if __name__ == '__main__': main()
mit
jiangzhuo/kbengine
kbe/res/scripts/common/Lib/idlelib/Bindings.py
61
3049
"""Define the menu contents, hotkeys, and event bindings. There is additional configuration information in the EditorWindow class (and subclasses): the menus are created there based on the menu_specs (class) variable, and menus not created are silently skipped in the code here. This makes it possible, for example, to define a Debug menu which is only present in the PythonShell window, and a Format menu which is only present in the Editor windows. """ from importlib.util import find_spec from idlelib.configHandler import idleConf # Warning: menudefs is altered in macosxSupport.overrideRootMenu() # after it is determined that an OS X Aqua Tk is in use, # which cannot be done until after Tk() is first called. # Do not alter the 'file', 'options', or 'help' cascades here # without altering overrideRootMenu() as well. # TODO: Make this more robust menudefs = [ # underscore prefixes character to underscore ('file', [ ('_New File', '<<open-new-window>>'), ('_Open...', '<<open-window-from-file>>'), ('Open _Module...', '<<open-module>>'), ('Class _Browser', '<<open-class-browser>>'), ('_Path Browser', '<<open-path-browser>>'), None, ('_Save', '<<save-window>>'), ('Save _As...', '<<save-window-as-file>>'), ('Save Cop_y As...', '<<save-copy-of-window-as-file>>'), None, ('Prin_t Window', '<<print-window>>'), None, ('_Close', '<<close-window>>'), ('E_xit', '<<close-all-windows>>'), ]), ('edit', [ ('_Undo', '<<undo>>'), ('_Redo', '<<redo>>'), None, ('Cu_t', '<<cut>>'), ('_Copy', '<<copy>>'), ('_Paste', '<<paste>>'), ('Select _All', '<<select-all>>'), None, ('_Find...', '<<find>>'), ('Find A_gain', '<<find-again>>'), ('Find _Selection', '<<find-selection>>'), ('Find in Files...', '<<find-in-files>>'), ('R_eplace...', '<<replace>>'), ('Go to _Line', '<<goto-line>>'), ]), ('format', [ ('_Indent Region', '<<indent-region>>'), ('_Dedent Region', '<<dedent-region>>'), ('Comment _Out Region', '<<comment-region>>'), ('U_ncomment Region', '<<uncomment-region>>'), ('Tabify Region', '<<tabify-region>>'), ('Untabify Region', '<<untabify-region>>'), ('Toggle Tabs', '<<toggle-tabs>>'), ('New Indent Width', '<<change-indentwidth>>'), ]), ('run', [ ('Python Shell', '<<open-python-shell>>'), ]), ('shell', [ ('_View Last Restart', '<<view-restart>>'), ('_Restart Shell', '<<restart-shell>>'), ]), ('debug', [ ('_Go to File/Line', '<<goto-file-line>>'), ('!_Debugger', '<<toggle-debugger>>'), ('_Stack Viewer', '<<open-stack-viewer>>'), ('!_Auto-open Stack Viewer', '<<toggle-jit-stack-viewer>>'), ]), ('options', [ ('_Configure IDLE...', '<<open-config-dialog>>'), None, ]), ('help', [ ('_About IDLE', '<<about-idle>>'), None, ('_IDLE Help', '<<help>>'), ('Python _Docs', '<<python-docs>>'), ]), ] if find_spec('turtledemo'): menudefs[-1][1].append(('Turtle Demo', '<<open-turtle-demo>>')) default_keydefs = idleConf.GetCurrentKeySet()
lgpl-3.0
apixandru/intellij-community
python/helpers/py3only/docutils/languages/cs.py
52
1894
# $Id: cs.py 4564 2006-05-21 20:44:42Z wiemann $ # Author: Marek Blaha <mb@dat.cz> # Copyright: This module has been placed in the public domain. # New language mappings are welcome. Before doing a new translation, please # read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be # translated for each language: one in docutils/languages, the other in # docutils/parsers/rst/languages. """ Czech-language mappings for language-dependent features of Docutils. """ __docformat__ = 'reStructuredText' labels = { # fixed: language-dependent 'author': 'Autor', 'authors': 'Auto\u0159i', 'organization': 'Organizace', 'address': 'Adresa', 'contact': 'Kontakt', 'version': 'Verze', 'revision': 'Revize', 'status': 'Stav', 'date': 'Datum', 'copyright': 'Copyright', 'dedication': 'V\u011Bnov\u00E1n\u00ED', 'abstract': 'Abstrakt', 'attention': 'Pozor!', 'caution': 'Opatrn\u011B!', 'danger': '!NEBEZPE\u010C\u00CD!', 'error': 'Chyba', 'hint': 'Rada', 'important': 'D\u016Fle\u017Eit\u00E9', 'note': 'Pozn\u00E1mka', 'tip': 'Tip', 'warning': 'Varov\u00E1n\u00ED', 'contents': 'Obsah'} """Mapping of node class name to label text.""" bibliographic_fields = { # language-dependent: fixed 'autor': 'author', 'auto\u0159i': 'authors', 'organizace': 'organization', 'adresa': 'address', 'kontakt': 'contact', 'verze': 'version', 'revize': 'revision', 'stav': 'status', 'datum': 'date', 'copyright': 'copyright', 'v\u011Bnov\u00E1n\u00ED': 'dedication', 'abstrakt': 'abstract'} """Czech (lowcased) to canonical name mapping for bibliographic fields.""" author_separators = [';', ','] """List of separator strings for the 'Authors' bibliographic field. Tried in order."""
apache-2.0
DJMuggs/ansible-modules-extras
network/a10/a10_server.py
5
10633
#!/usr/bin/python # -*- coding: utf-8 -*- """ Ansible module to manage A10 Networks slb server objects (c) 2014, Mischa Peters <mpeters@a10networks.com> This file is part of Ansible Ansible is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Ansible is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Ansible. If not, see <http://www.gnu.org/licenses/>. """ DOCUMENTATION = ''' --- module: a10_server version_added: 1.8 short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices description: - Manage slb server objects on A10 Networks devices via aXAPI author: '"Mischa Peters (@mischapeters)" <mpeters@a10networks.com>' notes: - Requires A10 Networks aXAPI 2.1 options: host: description: - hostname or ip of your A10 Networks device required: true default: null aliases: [] choices: [] username: description: - admin account of your A10 Networks device required: true default: null aliases: ['user', 'admin'] choices: [] password: description: - admin password of your A10 Networks device required: true default: null aliases: ['pass', 'pwd'] choices: [] server_name: description: - slb server name required: true default: null aliases: ['server'] choices: [] server_ip: description: - slb server IP address required: false default: null aliases: ['ip', 'address'] choices: [] server_status: description: - slb virtual server status required: false default: enable aliases: ['status'] choices: ['enabled', 'disabled'] server_ports: description: - A list of ports to create for the server. Each list item should be a dictionary which specifies the C(port:) and C(protocol:), but can also optionally specify the C(status:). See the examples below for details. This parameter is required when C(state) is C(present). required: false default: null aliases: [] choices: [] state: description: - create, update or remove slb server required: false default: present aliases: [] choices: ['present', 'absent'] ''' EXAMPLES = ''' # Create a new server - a10_server: host: a10.mydomain.com username: myadmin password: mypassword server: test server_ip: 1.1.1.100 server_ports: - port_num: 8080 protocol: tcp - port_num: 8443 protocol: TCP ''' VALID_PORT_FIELDS = ['port_num', 'protocol', 'status'] def validate_ports(module, ports): for item in ports: for key in item: if key not in VALID_PORT_FIELDS: module.fail_json(msg="invalid port field (%s), must be one of: %s" % (key, ','.join(VALID_PORT_FIELDS))) # validate the port number is present and an integer if 'port_num' in item: try: item['port_num'] = int(item['port_num']) except: module.fail_json(msg="port_num entries in the port definitions must be integers") else: module.fail_json(msg="port definitions must define the port_num field") # validate the port protocol is present, and convert it to # the internal API integer value (and validate it) if 'protocol' in item: protocol = axapi_get_port_protocol(item['protocol']) if not protocol: module.fail_json(msg="invalid port protocol, must be one of: %s" % ','.join(AXAPI_PORT_PROTOCOLS)) else: item['protocol'] = protocol else: module.fail_json(msg="port definitions must define the port protocol (%s)" % ','.join(AXAPI_PORT_PROTOCOLS)) # convert the status to the internal API integer value if 'status' in item: item['status'] = axapi_enabled_disabled(item['status']) else: item['status'] = 1 def main(): argument_spec = a10_argument_spec() argument_spec.update(url_argument_spec()) argument_spec.update( dict( state=dict(type='str', default='present', choices=['present', 'absent']), server_name=dict(type='str', aliases=['server'], required=True), server_ip=dict(type='str', aliases=['ip', 'address']), server_status=dict(type='str', default='enabled', aliases=['status'], choices=['enabled', 'disabled']), server_ports=dict(type='list', aliases=['port'], default=[]), ) ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=False ) host = module.params['host'] username = module.params['username'] password = module.params['password'] state = module.params['state'] write_config = module.params['write_config'] slb_server = module.params['server_name'] slb_server_ip = module.params['server_ip'] slb_server_status = module.params['server_status'] slb_server_ports = module.params['server_ports'] if slb_server is None: module.fail_json(msg='server_name is required') axapi_base_url = 'https://%s/services/rest/V2.1/?format=json' % host session_url = axapi_authenticate(module, axapi_base_url, username, password) # validate the ports data structure validate_ports(module, slb_server_ports) json_post = { 'server': { 'name': slb_server, } } # add optional module parameters if slb_server_ip: json_post['server']['host'] = slb_server_ip if slb_server_ports: json_post['server']['port_list'] = slb_server_ports if slb_server_status: json_post['server']['status'] = axapi_enabled_disabled(slb_server_status) slb_server_data = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': slb_server})) slb_server_exists = not axapi_failure(slb_server_data) changed = False if state == 'present': if not slb_server_exists: if not slb_server_ip: module.fail_json(msg='you must specify an IP address when creating a server') result = axapi_call(module, session_url + '&method=slb.server.create', json.dumps(json_post)) if axapi_failure(result): module.fail_json(msg="failed to create the server: %s" % result['response']['err']['msg']) changed = True else: def port_needs_update(src_ports, dst_ports): ''' Checks to determine if the port definitions of the src_ports array are in or different from those in dst_ports. If there is a difference, this function returns true, otherwise false. ''' for src_port in src_ports: found = False different = False for dst_port in dst_ports: if src_port['port_num'] == dst_port['port_num']: found = True for valid_field in VALID_PORT_FIELDS: if src_port[valid_field] != dst_port[valid_field]: different = True break if found or different: break if not found or different: return True # every port from the src exists in the dst, and none of them were different return False def status_needs_update(current_status, new_status): ''' Check to determine if we want to change the status of a server. If there is a difference between the current status of the server and the desired status, return true, otherwise false. ''' if current_status != new_status: return True return False defined_ports = slb_server_data.get('server', {}).get('port_list', []) current_status = slb_server_data.get('server', {}).get('status') # we check for a needed update several ways # - in case ports are missing from the ones specified by the user # - in case ports are missing from those on the device # - in case we are change the status of a server if port_needs_update(defined_ports, slb_server_ports) or port_needs_update(slb_server_ports, defined_ports) or status_needs_update(current_status, axapi_enabled_disabled(slb_server_status)): result = axapi_call(module, session_url + '&method=slb.server.update', json.dumps(json_post)) if axapi_failure(result): module.fail_json(msg="failed to update the server: %s" % result['response']['err']['msg']) changed = True # if we changed things, get the full info regarding # the service group for the return data below if changed: result = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': slb_server})) else: result = slb_server_data elif state == 'absent': if slb_server_exists: result = axapi_call(module, session_url + '&method=slb.server.delete', json.dumps({'name': slb_server})) changed = True else: result = dict(msg="the server was not present") # if the config has changed, or we want to force a save, save the config unless otherwise requested if changed or write_config: write_result = axapi_call(module, session_url + '&method=system.action.write_memory') if axapi_failure(write_result): module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg']) # log out of the session nicely and exit axapi_call(module, session_url + '&method=session.close') module.exit_json(changed=changed, content=result) # standard ansible module imports from ansible.module_utils.basic import * from ansible.module_utils.urls import * from ansible.module_utils.a10 import * main()
gpl-3.0
Stavitsky/python-neutronclient
neutronclient/tests/unit/test_cli20_networkprofile.py
6
6186
# Copyright 2013 Cisco Systems Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import sys from neutronclient.neutron.v2_0 import networkprofile from neutronclient.tests.unit import test_cli20 class CLITestV20NetworkProfile(test_cli20.CLITestV20Base): def test_create_networkprofile(self): """Create networkprofile: myid.""" resource = 'network_profile' cmd = networkprofile.CreateNetworkProfile(test_cli20. MyApp(sys.stdout), None) name = 'myname' myid = 'myid' segment_type = 'vlan' args = [name, segment_type] position_names = ['name', 'segment_type'] position_values = [name, segment_type] self._test_create_resource(resource, cmd, name, myid, args, position_names, position_values) def test_list_networkprofile_detail(self): """List networkprofile: -D.""" resources = 'network_profiles' cmd = networkprofile.ListNetworkProfile(test_cli20.MyApp(sys.stdout), None) contents = [{'name': 'myname', 'segment_type': 'vlan'}] self._test_list_resources(resources, cmd, True, response_contents=contents) def test_list_networkprofile_known_option_after_unknown(self): """List networkprofile: -- --tags a b --request-format xml.""" resources = 'network_profiles' cmd = networkprofile.ListNetworkProfile(test_cli20.MyApp(sys.stdout), None) contents = [{'name': 'myname', 'segment_type': 'vlan'}] self._test_list_resources(resources, cmd, tags=['a', 'b'], response_contents=contents) def test_list_networkprofile_fields(self): """List networkprofile: --fields a --fields b -- --fields c d.""" resources = 'network_profiles' cmd = networkprofile.ListNetworkProfile(test_cli20.MyApp(sys.stdout), None) contents = [{'name': 'myname', 'segment_type': 'vlan'}] self._test_list_resources(resources, cmd, fields_1=['a', 'b'], fields_2=['c', 'd'], response_contents=contents) def test_show_networkprofile(self): """Show networkprofile: --fields id --fields name myid.""" resource = 'network_profile' cmd = networkprofile.ShowNetworkProfile(test_cli20.MyApp(sys.stdout), None) args = ['--fields', 'id', '--fields', 'name', self.test_id] self._test_show_resource(resource, cmd, self.test_id, args, ['id', 'name']) def test_delete_networkprofile(self): """Delete networkprofile: myid.""" resource = 'network_profile' cmd = networkprofile.DeleteNetworkProfile(test_cli20. MyApp(sys.stdout), None) myid = 'myid' args = [myid] self._test_delete_resource(resource, cmd, myid, args) def test_create_networkprofile_trunk(self): """Create networkprofile: myid.""" resource = 'network_profile' cmd = networkprofile.CreateNetworkProfile(test_cli20. MyApp(sys.stdout), None) name = 'myname' myid = 'myid' segment_type = 'trunk' args = [name, segment_type, '--sub_type', 'vlan'] position_names = ['name', 'segment_type', ] position_values = [name, segment_type, ] self._test_create_resource(resource, cmd, name, myid, args, position_names, position_values, sub_type='vlan') def test_list_networkprofile_trunk_detail(self): """List networkprofile: -D.""" resources = 'network_profiles' cmd = networkprofile.ListNetworkProfile(test_cli20.MyApp(sys.stdout), None) contents = [{'name': 'myname', 'segment_type': 'trunk', '--sub_type': 'vlan'}] self._test_list_resources(resources, cmd, True, response_contents=contents) def test_create_networkprofile_multi_tenants(self): """Create networkprofile with mulitple tenants: myid.""" resource = 'network_profile' cmd = networkprofile.CreateNetworkProfile(test_cli20. MyApp(sys.stdout), None) name = 'myname' myid = 'myid' segment_type = 'vlan' args = [name, segment_type, '--add-tenant', 'demo', '--add-tenant', 'admin'] position_names = ['name', 'segment_type', 'add_tenants'] position_values = [name, segment_type, ['demo', 'admin']] self._test_create_resource(resource, cmd, name, myid, args, position_names, position_values) def test_update_networkprofile_multi_tenants(self): resource = 'network_profile' cmd = networkprofile.UpdateNetworkProfile(test_cli20. MyApp(sys.stdout), None) args = ['myid', '--add-tenant', 'service', '--add-tenant', 'demo', '--remove-tenant', 'demo'] extrafields = {'add_tenants': ['service', 'demo'], 'remove_tenants': ['demo']} self._test_update_resource(resource, cmd, 'myid', args, extrafields)
apache-2.0
morpheby/levelup-by
common/djangoapps/track/middleware.py
3
2046
import json import re from django.conf import settings import views class TrackMiddleware(object): def process_request(self, request): try: if not self._should_process_request(request): return # Removes passwords from the tracking logs # WARNING: This list needs to be changed whenever we change # password handling functionality. # # As of the time of this comment, only 'password' is used # The rest are there for future extension. # # Passwords should never be sent as GET requests, but # this can happen due to older browser bugs. We censor # this too. # # We should manually confirm no passwords make it into log # files when we change this. censored_strings = ['password', 'newpassword', 'new_password', 'oldpassword', 'old_password'] post_dict = dict(request.POST) get_dict = dict(request.GET) for string in censored_strings: if string in post_dict: post_dict[string] = '*' * 8 if string in get_dict: get_dict[string] = '*' * 8 event = {'GET': dict(get_dict), 'POST': dict(post_dict)} # TODO: Confirm no large file uploads event = json.dumps(event) event = event[:512] views.server_track(request, request.META['PATH_INFO'], event) except: pass def _should_process_request(self, request): path = request.META['PATH_INFO'] ignored_url_patterns = getattr(settings, 'TRACKING_IGNORE_URL_PATTERNS', []) for pattern in ignored_url_patterns: # Note we are explicitly relying on python's internal caching of # compiled regular expressions here. if re.match(pattern, path): return False return True
agpl-3.0
GoSteven/Diary
django/test/utils.py
13
3771
import sys import time import os import warnings from django.conf import settings from django.core import mail from django.core.mail.backends import locmem from django.test import signals from django.template import Template from django.utils.translation import deactivate __all__ = ('Approximate', 'ContextList', 'setup_test_environment', 'teardown_test_environment', 'get_runner') class Approximate(object): def __init__(self, val, places=7): self.val = val self.places = places def __repr__(self): return repr(self.val) def __eq__(self, other): if self.val == other: return True return round(abs(self.val-other), self.places) == 0 class ContextList(list): """A wrapper that provides direct key access to context items contained in a list of context objects. """ def __getitem__(self, key): if isinstance(key, basestring): for subcontext in self: if key in subcontext: return subcontext[key] raise KeyError(key) else: return super(ContextList, self).__getitem__(key) def __contains__(self, key): try: value = self[key] except KeyError: return False return True def instrumented_test_render(self, context): """ An instrumented Template render method, providing a signal that can be intercepted by the test system Client """ signals.template_rendered.send(sender=self, template=self, context=context) return self.nodelist.render(context) def setup_test_environment(): """Perform any global pre-test setup. This involves: - Installing the instrumented test renderer - Set the email backend to the locmem email backend. - Setting the active locale to match the LANGUAGE_CODE setting. """ Template.original_render = Template._render Template._render = instrumented_test_render mail.original_SMTPConnection = mail.SMTPConnection mail.SMTPConnection = locmem.EmailBackend mail.original_email_backend = settings.EMAIL_BACKEND settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend' mail.outbox = [] deactivate() def teardown_test_environment(): """Perform any global post-test teardown. This involves: - Restoring the original test renderer - Restoring the email sending functions """ Template._render = Template.original_render del Template.original_render mail.SMTPConnection = mail.original_SMTPConnection del mail.original_SMTPConnection settings.EMAIL_BACKEND = mail.original_email_backend del mail.original_email_backend del mail.outbox def get_warnings_state(): """ Returns an object containing the state of the warnings module """ # There is no public interface for doing this, but this implementation of # get_warnings_state and restore_warnings_state appears to work on Python # 2.4 to 2.7. return warnings.filters[:] def restore_warnings_state(state): """ Restores the state of the warnings module when passed an object that was returned by get_warnings_state() """ warnings.filters = state[:] def get_runner(settings): test_path = settings.TEST_RUNNER.split('.') # Allow for Python 2.5 relative paths if len(test_path) > 1: test_module_name = '.'.join(test_path[:-1]) else: test_module_name = '.' test_module = __import__(test_module_name, {}, {}, test_path[-1]) test_runner = getattr(test_module, test_path[-1]) return test_runner
bsd-3-clause