repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
brianwoo/django-tutorial | refs/heads/master | build/Django/django/contrib/contenttypes/checks.py | 456 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import apps
from django.utils import six
def check_generic_foreign_keys(**kwargs):
from .fields import GenericForeignKey
errors = []
fields = (obj
for cls in apps.get_models()
for obj in six.itervalues(vars(cls))
if isinstance(obj, GenericForeignKey))
for field in fields:
errors.extend(field.check())
return errors
|
Thraxis/SickRage | refs/heads/master | lib/imdb/parser/http/searchKeywordParser.py | 128 | """
parser.http.searchKeywordParser module (imdb package).
This module provides the HTMLSearchKeywordParser class (and the
search_company_parser instance), used to parse the results of a search
for a given keyword.
E.g., when searching for the keyword "alabama", the parsed page would be:
http://akas.imdb.com/find?s=kw;mx=20;q=alabama
Copyright 2009 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from utils import Extractor, Attribute, analyze_imdbid
from imdb.utils import analyze_title, analyze_company_name
from searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser
class DOMBasicKeywordParser(DOMBasicMovieParser):
"""Simply get the name of a keyword.
It's used by the DOMHTMLSearchKeywordParser class to return a result
for a direct match (when a search on IMDb results in a single
keyword, the web server sends directly the keyword page.
"""
# XXX: it's still to be tested!
# I'm not even sure there can be a direct hit, searching for keywords.
_titleFunct = lambda self, x: analyze_company_name(x or u'')
class DOMHTMLSearchKeywordParser(DOMHTMLSearchMovieParser):
"""Parse the html page that the IMDb web server shows when the
"new search system" is used, searching for keywords similar to
the one given."""
_BaseParser = DOMBasicKeywordParser
_notDirectHitTitle = '<title>imdb keyword'
_titleBuilder = lambda self, x: x
_linkPrefix = '/keyword/'
_attrs = [Attribute(key='data',
multi=True,
path="./a[1]/text()"
)]
extractors = [Extractor(label='search',
path="//td[3]/a[starts-with(@href, " \
"'/keyword/')]/..",
attrs=_attrs)]
def custom_analyze_title4kwd(title, yearNote, outline):
"""Return a dictionary with the needed info."""
title = title.strip()
if not title:
return {}
if yearNote:
yearNote = '%s)' % yearNote.split(' ')[0]
title = title + ' ' + yearNote
retDict = analyze_title(title)
if outline:
retDict['plot outline'] = outline
return retDict
class DOMHTMLSearchMovieKeywordParser(DOMHTMLSearchMovieParser):
"""Parse the html page that the IMDb web server shows when the
"new search system" is used, searching for movies with the given
keyword."""
_notDirectHitTitle = '<title>best'
_attrs = [Attribute(key='data',
multi=True,
path={
'link': "./a[1]/@href",
'info': "./a[1]//text()",
'ynote': "./span[@class='desc']/text()",
'outline': "./span[@class='outline']//text()"
},
postprocess=lambda x: (
analyze_imdbid(x.get('link') or u''),
custom_analyze_title4kwd(x.get('info') or u'',
x.get('ynote') or u'',
x.get('outline') or u'')
))]
extractors = [Extractor(label='search',
path="//td[3]/a[starts-with(@href, " \
"'/title/tt')]/..",
attrs=_attrs)]
_OBJECTS = {
'search_keyword_parser': ((DOMHTMLSearchKeywordParser,),
{'kind': 'keyword', '_basic_parser': DOMBasicKeywordParser}),
'search_moviekeyword_parser': ((DOMHTMLSearchMovieKeywordParser,), None)
}
|
tailorian/Sick-Beard | refs/heads/ThePirateBay | SickBeard.py | 6 | #!/usr/bin/env python2
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
# Check needed software dependencies to nudge users to fix their setup
import sys
if sys.version_info < (2, 6):
print "Sorry, requires Python 2.6 or 2.7."
sys.exit(1)
try:
import Cheetah
if Cheetah.Version[0] != '2':
raise ValueError
except ValueError:
print "Sorry, requires Python module Cheetah 2.1.0 or newer."
sys.exit(1)
except:
print "The Python module Cheetah is required"
sys.exit(1)
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), 'lib')))
# We only need this for compiling an EXE and I will just always do that on 2.6+
if sys.hexversion >= 0x020600F0:
from multiprocessing import freeze_support # @UnresolvedImport
import locale
import threading
import time
import signal
import traceback
import getopt
import sickbeard
from sickbeard import db
from sickbeard.tv import TVShow
from sickbeard import logger
from sickbeard.version import SICKBEARD_VERSION
from sickbeard.databases.mainDB import MIN_DB_VERSION
from sickbeard.databases.mainDB import MAX_DB_VERSION
from sickbeard.webserveInit import initWebServer
from lib.configobj import ConfigObj
signal.signal(signal.SIGINT, sickbeard.sig_handler)
signal.signal(signal.SIGTERM, sickbeard.sig_handler)
def loadShowsFromDB():
"""
Populates the showList with shows from the database
"""
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_shows")
for sqlShow in sqlResults:
try:
curShow = TVShow(int(sqlShow["tvdb_id"]))
sickbeard.showList.append(curShow)
except Exception, e:
logger.log(u"There was an error creating the show in " + sqlShow["location"] + ": " + str(e).decode('utf-8'), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
# TODO: update the existing shows if the showlist has something in it
def daemonize():
"""
Fork off as a daemon
"""
# pylint: disable=E1101
# Make a non-session-leader child process
try:
pid = os.fork() # @UndefinedVariable - only available in UNIX
if pid != 0:
sys.exit(0)
except OSError, e:
raise RuntimeError("1st fork failed: %s [%d]" % (e.strerror, e.errno))
os.setsid() # @UndefinedVariable - only available in UNIX
# Make sure I can read my own files and shut out others
prev = os.umask(0)
os.umask(prev and int('077', 8))
# Make the child a session-leader by detaching from the terminal
try:
pid = os.fork() # @UndefinedVariable - only available in UNIX
if pid != 0:
sys.exit(0)
except OSError, e:
raise RuntimeError("2nd fork failed: %s [%d]" % (e.strerror, e.errno))
dev_null = file('/dev/null', 'r')
os.dup2(dev_null.fileno(), sys.stdin.fileno())
if sickbeard.CREATEPID:
pid = str(os.getpid())
logger.log(u"Writing PID " + pid + " to " + str(sickbeard.PIDFILE))
file(sickbeard.PIDFILE, 'w').write("%s\n" % pid)
def main():
"""
TV for me
"""
# do some preliminary stuff
sickbeard.MY_FULLNAME = os.path.normpath(os.path.abspath(__file__))
sickbeard.MY_NAME = os.path.basename(sickbeard.MY_FULLNAME)
sickbeard.PROG_DIR = os.path.dirname(sickbeard.MY_FULLNAME)
sickbeard.DATA_DIR = sickbeard.PROG_DIR
sickbeard.MY_ARGS = sys.argv[1:]
sickbeard.CREATEPID = False
sickbeard.DAEMON = False
sickbeard.SYS_ENCODING = None
try:
locale.setlocale(locale.LC_ALL, "")
sickbeard.SYS_ENCODING = locale.getpreferredencoding()
except (locale.Error, IOError):
pass
# For OSes that are poorly configured I'll just randomly force UTF-8
if not sickbeard.SYS_ENCODING or sickbeard.SYS_ENCODING in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'):
sickbeard.SYS_ENCODING = 'UTF-8'
if not hasattr(sys, "setdefaultencoding"):
reload(sys)
try:
# pylint: disable=E1101
# On non-unicode builds this will raise an AttributeError, if encoding type is not valid it throws a LookupError
sys.setdefaultencoding(sickbeard.SYS_ENCODING)
except:
print 'Sorry, you MUST add the Sick Beard folder to the PYTHONPATH environment variable'
print 'or find another way to force Python to use ' + sickbeard.SYS_ENCODING + ' for string encoding.'
sys.exit(1)
# Need console logging for SickBeard.py and SickBeard-console.exe
consoleLogging = (not hasattr(sys, "frozen")) or (sickbeard.MY_NAME.lower().find('-console') > 0)
# Rename the main thread
threading.currentThread().name = "MAIN"
try:
opts, args = getopt.getopt(sys.argv[1:], "qfdp::", ['quiet', 'forceupdate', 'daemon', 'port=', 'pidfile=', 'nolaunch', 'config=', 'datadir=']) # @UnusedVariable
except getopt.GetoptError:
print "Available Options: --quiet, --forceupdate, --port, --daemon, --pidfile, --config, --datadir"
sys.exit()
forceUpdate = False
forcedPort = None
noLaunch = False
for o, a in opts:
# For now we'll just silence the logging
if o in ('-q', '--quiet'):
consoleLogging = False
# Should we update (from tvdb) all shows in the DB right away?
if o in ('-f', '--forceupdate'):
forceUpdate = True
# Suppress launching web browser
# Needed for OSes without default browser assigned
# Prevent duplicate browser window when restarting in the app
if o in ('--nolaunch',):
noLaunch = True
# Override default/configured port
if o in ('-p', '--port'):
forcedPort = int(a)
# Run as a daemon
if o in ('-d', '--daemon'):
if sys.platform == 'win32':
print "Daemonize not supported under Windows, starting normally"
else:
consoleLogging = False
sickbeard.DAEMON = True
# Specify folder to load the config file from
if o in ('--config',):
sickbeard.CONFIG_FILE = os.path.abspath(a)
# Specify folder to use as the data dir
if o in ('--datadir',):
sickbeard.DATA_DIR = os.path.abspath(a)
# Write a pidfile if requested
if o in ('--pidfile',):
sickbeard.PIDFILE = str(a)
# If the pidfile already exists, sickbeard may still be running, so exit
if os.path.exists(sickbeard.PIDFILE):
sys.exit("PID file '" + sickbeard.PIDFILE + "' already exists. Exiting.")
# The pidfile is only useful in daemon mode, make sure we can write the file properly
if sickbeard.DAEMON:
sickbeard.CREATEPID = True
try:
file(sickbeard.PIDFILE, 'w').write("pid\n")
except IOError, e:
raise SystemExit("Unable to write PID file: %s [%d]" % (e.strerror, e.errno))
else:
logger.log(u"Not running in daemon mode. PID file creation disabled.")
# If they don't specify a config file then put it in the data dir
if not sickbeard.CONFIG_FILE:
sickbeard.CONFIG_FILE = os.path.join(sickbeard.DATA_DIR, "config.ini")
# Make sure that we can create the data dir
if not os.access(sickbeard.DATA_DIR, os.F_OK):
try:
os.makedirs(sickbeard.DATA_DIR, 0744)
except os.error, e:
raise SystemExit("Unable to create datadir '" + sickbeard.DATA_DIR + "'")
# Make sure we can write to the data dir
if not os.access(sickbeard.DATA_DIR, os.W_OK):
raise SystemExit("Datadir must be writeable '" + sickbeard.DATA_DIR + "'")
# Make sure we can write to the config file
if not os.access(sickbeard.CONFIG_FILE, os.W_OK):
if os.path.isfile(sickbeard.CONFIG_FILE):
raise SystemExit("Config file '" + sickbeard.CONFIG_FILE + "' must be writeable.")
elif not os.access(os.path.dirname(sickbeard.CONFIG_FILE), os.W_OK):
raise SystemExit("Config file root dir '" + os.path.dirname(sickbeard.CONFIG_FILE) + "' must be writeable.")
os.chdir(sickbeard.DATA_DIR)
if consoleLogging:
print "Starting up Sick Beard " + SICKBEARD_VERSION + " from " + sickbeard.CONFIG_FILE
# Load the config and publish it to the sickbeard package
if not os.path.isfile(sickbeard.CONFIG_FILE):
logger.log(u"Unable to find '" + sickbeard.CONFIG_FILE + "' , all settings will be default!", logger.ERROR)
sickbeard.CFG = ConfigObj(sickbeard.CONFIG_FILE)
CUR_DB_VERSION = db.DBConnection().checkDBVersion()
if CUR_DB_VERSION > 0:
if CUR_DB_VERSION < MIN_DB_VERSION:
raise SystemExit("Your database version (" + str(db.DBConnection().checkDBVersion()) + ") is too old to migrate from with this version of Sick Beard (" + str(MIN_DB_VERSION) + ").\n" + \
"Upgrade using a previous version of SB first, or start with no database file to begin fresh.")
if CUR_DB_VERSION > MAX_DB_VERSION:
raise SystemExit("Your database version (" + str(db.DBConnection().checkDBVersion()) + ") has been incremented past what this version of Sick Beard supports (" + str(MAX_DB_VERSION) + ").\n" + \
"If you have used other forks of SB, your database may be unusable due to their modifications.")
# Initialize the config and our threads
sickbeard.initialize(consoleLogging=consoleLogging)
sickbeard.showList = []
if sickbeard.DAEMON:
daemonize()
# Use this PID for everything
sickbeard.PID = os.getpid()
if forcedPort:
logger.log(u"Forcing web server to port " + str(forcedPort))
startPort = forcedPort
else:
startPort = sickbeard.WEB_PORT
if sickbeard.WEB_LOG:
log_dir = sickbeard.LOG_DIR
else:
log_dir = None
# sickbeard.WEB_HOST is available as a configuration value in various
# places but is not configurable. It is supported here for historic reasons.
if sickbeard.WEB_HOST and sickbeard.WEB_HOST != '0.0.0.0':
webhost = sickbeard.WEB_HOST
else:
if sickbeard.WEB_IPV6:
webhost = '::'
else:
webhost = '0.0.0.0'
try:
initWebServer({
'port': startPort,
'host': webhost,
'data_root': os.path.join(sickbeard.PROG_DIR, 'gui/'+sickbeard.GUI_NAME),
'web_root': sickbeard.WEB_ROOT,
'log_dir': log_dir,
'username': sickbeard.WEB_USERNAME,
'password': sickbeard.WEB_PASSWORD,
'enable_https': sickbeard.ENABLE_HTTPS,
'https_cert': sickbeard.HTTPS_CERT,
'https_key': sickbeard.HTTPS_KEY,
})
except IOError:
logger.log(u"Unable to start web server, is something else running on port %d?" % startPort, logger.ERROR)
if sickbeard.LAUNCH_BROWSER and not sickbeard.DAEMON:
logger.log(u"Launching browser and exiting", logger.ERROR)
sickbeard.launchBrowser(startPort)
sys.exit()
# Build from the DB to start with
logger.log(u"Loading initial show list")
loadShowsFromDB()
# Fire up all our threads
sickbeard.start()
# Launch browser if we're supposed to
if sickbeard.LAUNCH_BROWSER and not noLaunch and not sickbeard.DAEMON:
sickbeard.launchBrowser(startPort)
# Start an update if we're supposed to
if forceUpdate or sickbeard.UPDATE_SHOWS_ON_START:
sickbeard.showUpdateScheduler.action.run(force=True) # @UndefinedVariable
# Stay alive while my threads do the work
while (True):
if sickbeard.invoked_command:
sickbeard.invoked_command()
sickbeard.invoked_command = None
time.sleep(1)
return
if __name__ == "__main__":
if sys.hexversion >= 0x020600F0:
freeze_support()
main()
|
gvalentine/pyqtgraph | refs/heads/develop | pyqtgraph/SRTTransform.py | 34 | # -*- coding: utf-8 -*-
from .Qt import QtCore, QtGui
from .Point import Point
import numpy as np
class SRTTransform(QtGui.QTransform):
"""Transform that can always be represented as a combination of 3 matrices: scale * rotate * translate
This transform has no shear; angles are always preserved.
"""
def __init__(self, init=None):
QtGui.QTransform.__init__(self)
self.reset()
if init is None:
return
elif isinstance(init, dict):
self.restoreState(init)
elif isinstance(init, SRTTransform):
self._state = {
'pos': Point(init._state['pos']),
'scale': Point(init._state['scale']),
'angle': init._state['angle']
}
self.update()
elif isinstance(init, QtGui.QTransform):
self.setFromQTransform(init)
elif isinstance(init, QtGui.QMatrix4x4):
self.setFromMatrix4x4(init)
else:
raise Exception("Cannot create SRTTransform from input type: %s" % str(type(init)))
def getScale(self):
return self._state['scale']
def getAngle(self):
## deprecated; for backward compatibility
return self.getRotation()
def getRotation(self):
return self._state['angle']
def getTranslation(self):
return self._state['pos']
def reset(self):
self._state = {
'pos': Point(0,0),
'scale': Point(1,1),
'angle': 0.0 ## in degrees
}
self.update()
def setFromQTransform(self, tr):
p1 = Point(tr.map(0., 0.))
p2 = Point(tr.map(1., 0.))
p3 = Point(tr.map(0., 1.))
dp2 = Point(p2-p1)
dp3 = Point(p3-p1)
## detect flipped axes
if dp2.angle(dp3) > 0:
#da = 180
da = 0
sy = -1.0
else:
da = 0
sy = 1.0
self._state = {
'pos': Point(p1),
'scale': Point(dp2.length(), dp3.length() * sy),
'angle': (np.arctan2(dp2[1], dp2[0]) * 180. / np.pi) + da
}
self.update()
def setFromMatrix4x4(self, m):
m = SRTTransform3D(m)
angle, axis = m.getRotation()
if angle != 0 and (axis[0] != 0 or axis[1] != 0 or axis[2] != 1):
print("angle: %s axis: %s" % (str(angle), str(axis)))
raise Exception("Can only convert 4x4 matrix to 3x3 if rotation is around Z-axis.")
self._state = {
'pos': Point(m.getTranslation()),
'scale': Point(m.getScale()),
'angle': angle
}
self.update()
def translate(self, *args):
"""Acceptable arguments are:
x, y
[x, y]
Point(x,y)"""
t = Point(*args)
self.setTranslate(self._state['pos']+t)
def setTranslate(self, *args):
"""Acceptable arguments are:
x, y
[x, y]
Point(x,y)"""
self._state['pos'] = Point(*args)
self.update()
def scale(self, *args):
"""Acceptable arguments are:
x, y
[x, y]
Point(x,y)"""
s = Point(*args)
self.setScale(self._state['scale'] * s)
def setScale(self, *args):
"""Acceptable arguments are:
x, y
[x, y]
Point(x,y)"""
self._state['scale'] = Point(*args)
self.update()
def rotate(self, angle):
"""Rotate the transformation by angle (in degrees)"""
self.setRotate(self._state['angle'] + angle)
def setRotate(self, angle):
"""Set the transformation rotation to angle (in degrees)"""
self._state['angle'] = angle
self.update()
def __truediv__(self, t):
"""A / B == B^-1 * A"""
dt = t.inverted()[0] * self
return SRTTransform(dt)
def __div__(self, t):
return self.__truediv__(t)
def __mul__(self, t):
return SRTTransform(QtGui.QTransform.__mul__(self, t))
def saveState(self):
p = self._state['pos']
s = self._state['scale']
#if s[0] == 0:
#raise Exception('Invalid scale: %s' % str(s))
return {'pos': (p[0], p[1]), 'scale': (s[0], s[1]), 'angle': self._state['angle']}
def restoreState(self, state):
self._state['pos'] = Point(state.get('pos', (0,0)))
self._state['scale'] = Point(state.get('scale', (1.,1.)))
self._state['angle'] = state.get('angle', 0)
self.update()
def update(self):
QtGui.QTransform.reset(self)
## modifications to the transform are multiplied on the right, so we need to reverse order here.
QtGui.QTransform.translate(self, *self._state['pos'])
QtGui.QTransform.rotate(self, self._state['angle'])
QtGui.QTransform.scale(self, *self._state['scale'])
def __repr__(self):
return str(self.saveState())
def matrix(self):
return np.array([[self.m11(), self.m12(), self.m13()],[self.m21(), self.m22(), self.m23()],[self.m31(), self.m32(), self.m33()]])
if __name__ == '__main__':
from . import widgets
import GraphicsView
from .functions import *
app = QtGui.QApplication([])
win = QtGui.QMainWindow()
win.show()
cw = GraphicsView.GraphicsView()
#cw.enableMouse()
win.setCentralWidget(cw)
s = QtGui.QGraphicsScene()
cw.setScene(s)
win.resize(600,600)
cw.enableMouse()
cw.setRange(QtCore.QRectF(-100., -100., 200., 200.))
class Item(QtGui.QGraphicsItem):
def __init__(self):
QtGui.QGraphicsItem.__init__(self)
self.b = QtGui.QGraphicsRectItem(20, 20, 20, 20, self)
self.b.setPen(QtGui.QPen(mkPen('y')))
self.t1 = QtGui.QGraphicsTextItem(self)
self.t1.setHtml('<span style="color: #F00">R</span>')
self.t1.translate(20, 20)
self.l1 = QtGui.QGraphicsLineItem(10, 0, -10, 0, self)
self.l2 = QtGui.QGraphicsLineItem(0, 10, 0, -10, self)
self.l1.setPen(QtGui.QPen(mkPen('y')))
self.l2.setPen(QtGui.QPen(mkPen('y')))
def boundingRect(self):
return QtCore.QRectF()
def paint(self, *args):
pass
#s.addItem(b)
#s.addItem(t1)
item = Item()
s.addItem(item)
l1 = QtGui.QGraphicsLineItem(10, 0, -10, 0)
l2 = QtGui.QGraphicsLineItem(0, 10, 0, -10)
l1.setPen(QtGui.QPen(mkPen('r')))
l2.setPen(QtGui.QPen(mkPen('r')))
s.addItem(l1)
s.addItem(l2)
tr1 = SRTTransform()
tr2 = SRTTransform()
tr3 = QtGui.QTransform()
tr3.translate(20, 0)
tr3.rotate(45)
print("QTransform -> Transform:", SRTTransform(tr3))
print("tr1:", tr1)
tr2.translate(20, 0)
tr2.rotate(45)
print("tr2:", tr2)
dt = tr2/tr1
print("tr2 / tr1 = ", dt)
print("tr2 * tr1 = ", tr2*tr1)
tr4 = SRTTransform()
tr4.scale(-1, 1)
tr4.rotate(30)
print("tr1 * tr4 = ", tr1*tr4)
w1 = widgets.TestROI((19,19), (22, 22), invertible=True)
#w2 = widgets.TestROI((0,0), (150, 150))
w1.setZValue(10)
s.addItem(w1)
#s.addItem(w2)
w1Base = w1.getState()
#w2Base = w2.getState()
def update():
tr1 = w1.getGlobalTransform(w1Base)
#tr2 = w2.getGlobalTransform(w2Base)
item.setTransform(tr1)
#def update2():
#tr1 = w1.getGlobalTransform(w1Base)
#tr2 = w2.getGlobalTransform(w2Base)
#t1.setTransform(tr1)
#w1.setState(w1Base)
#w1.applyGlobalTransform(tr2)
w1.sigRegionChanged.connect(update)
#w2.sigRegionChanged.connect(update2)
from .SRTTransform3D import SRTTransform3D
|
Charlotte-Morgan/inasafe | refs/heads/develop | safe_extras/raven/processors.py | 11 | """
raven.core.processors
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import re
import warnings
from raven.utils.compat import string_types, text_type, PY3
from raven.utils import varmap
class Processor(object):
def __init__(self, client):
self.client = client
def get_data(self, data, **kwargs):
return
def process(self, data, **kwargs):
resp = self.get_data(data, **kwargs)
if resp:
data = resp
if 'exception' in data:
if 'values' in data['exception']:
for value in data['exception'].get('values', []):
if 'stacktrace' in value:
self.filter_stacktrace(value['stacktrace'])
if 'request' in data:
self.filter_http(data['request'])
if 'extra' in data:
data['extra'] = self.filter_extra(data['extra'])
return data
def filter_stacktrace(self, data):
pass
def filter_http(self, data):
pass
def filter_extra(self, data):
return data
class RemovePostDataProcessor(Processor):
"""Removes HTTP post data."""
def filter_http(self, data, **kwargs):
data.pop('data', None)
class RemoveStackLocalsProcessor(Processor):
"""Removes local context variables from stacktraces."""
def filter_stacktrace(self, data, **kwargs):
for frame in data.get('frames', []):
frame.pop('vars', None)
class SanitizeKeysProcessor(Processor):
"""
Asterisk out things that correspond to a configurable set of keys.
"""
MASK = '*' * 8
@property
def sanitize_keys(self):
keys = getattr(self.client, 'sanitize_keys')
if keys is None:
raise ValueError('The sanitize_keys setting must be present to use SanitizeKeysProcessor')
return keys
def sanitize(self, item, value):
if value is None:
return
if not item: # key can be a NoneType
return value
# Just in case we have bytes here, we want to make them into text
# properly without failing so we can perform our check.
if isinstance(item, bytes):
item = item.decode('utf-8', 'replace')
else:
item = text_type(item)
item = item.lower()
for key in self.sanitize_keys:
if key in item:
# store mask as a fixed length for security
return self.MASK
return value
def filter_stacktrace(self, data):
for frame in data.get('frames', []):
if 'vars' not in frame:
continue
frame['vars'] = varmap(self.sanitize, frame['vars'])
def filter_http(self, data):
for n in ('data', 'cookies', 'headers', 'env', 'query_string'):
if n not in data:
continue
# data could be provided as bytes
if PY3 and isinstance(data[n], bytes):
data[n] = data[n].decode('utf-8', 'replace')
if isinstance(data[n], string_types) and '=' in data[n]:
# at this point we've assumed it's a standard HTTP query
# or cookie
if n == 'cookies':
delimiter = ';'
else:
delimiter = '&'
data[n] = self._sanitize_keyvals(data[n], delimiter)
else:
data[n] = varmap(self.sanitize, data[n])
if n == 'headers' and 'Cookie' in data[n]:
data[n]['Cookie'] = self._sanitize_keyvals(
data[n]['Cookie'], ';'
)
def filter_extra(self, data):
return varmap(self.sanitize, data)
def _sanitize_keyvals(self, keyvals, delimiter):
sanitized_keyvals = []
for keyval in keyvals.split(delimiter):
keyval = keyval.split('=')
if len(keyval) == 2:
sanitized_keyvals.append((keyval[0], self.sanitize(*keyval)))
else:
sanitized_keyvals.append(keyval)
return delimiter.join('='.join(keyval) for keyval in sanitized_keyvals)
class SanitizePasswordsProcessor(SanitizeKeysProcessor):
"""
Asterisk out things that look like passwords, credit card numbers,
and API keys in frames, http, and basic extra data.
"""
KEYS = frozenset([
'password',
'secret',
'passwd',
'authorization',
'api_key',
'apikey',
'sentry_dsn',
'access_token',
])
VALUES_RE = re.compile(r'^(?:\d[ -]*?){13,16}$')
@property
def sanitize_keys(self):
return self.KEYS
@property
def FIELDS(self):
warnings.warn(
"`SanitizePasswordsProcessor.Fields` has been deprecated. Use "
"`SanitizePasswordsProcessor.KEYS` or `SanitizePasswordsProcessor.sanitize_keys` "
"instead",
DeprecationWarning,
)
return self.KEYS
def sanitize(self, item, value):
value = super(SanitizePasswordsProcessor, self).sanitize(item, value)
if isinstance(value, string_types) and self.VALUES_RE.match(value):
return self.MASK
return value
|
smartczm/python-learn | refs/heads/master | Old-day01-10/s13-day12/day12/homework/rpc/demo2/conf/__init__.py | 16 | #!/usr/bin/env python3.5
# -*- coding: utf-8 -*-
# Author: Lang |
varunagrawal/azure-services | refs/heads/master | varunagrawal/VarunWeb/env/Lib/site-packages/django/views/decorators/debug.py | 173 | import functools
from django.http import HttpRequest
def sensitive_variables(*variables):
"""
Indicates which variables used in the decorated function are sensitive, so
that those variables can later be treated in a special way, for example
by hiding them when logging unhandled exceptions.
Two forms are accepted:
* with specified variable names:
@sensitive_variables('user', 'password', 'credit_card')
def my_function(user):
password = user.pass_word
credit_card = user.credit_card_number
...
* without any specified variable names, in which case it is assumed that
all variables are considered sensitive:
@sensitive_variables()
def my_function()
...
"""
def decorator(func):
@functools.wraps(func)
def sensitive_variables_wrapper(*func_args, **func_kwargs):
if variables:
sensitive_variables_wrapper.sensitive_variables = variables
else:
sensitive_variables_wrapper.sensitive_variables = '__ALL__'
return func(*func_args, **func_kwargs)
return sensitive_variables_wrapper
return decorator
def sensitive_post_parameters(*parameters):
"""
Indicates which POST parameters used in the decorated view are sensitive,
so that those parameters can later be treated in a special way, for example
by hiding them when logging unhandled exceptions.
Two forms are accepted:
* with specified parameters:
@sensitive_post_parameters('password', 'credit_card')
def my_view(request):
pw = request.POST['password']
cc = request.POST['credit_card']
...
* without any specified parameters, in which case it is assumed that
all parameters are considered sensitive:
@sensitive_post_parameters()
def my_view(request)
...
"""
def decorator(view):
@functools.wraps(view)
def sensitive_post_parameters_wrapper(request, *args, **kwargs):
assert isinstance(request, HttpRequest), (
"sensitive_post_parameters didn't receive an HttpRequest. If you "
"are decorating a classmethod, be sure to use @method_decorator."
)
if parameters:
request.sensitive_post_parameters = parameters
else:
request.sensitive_post_parameters = '__ALL__'
return view(request, *args, **kwargs)
return sensitive_post_parameters_wrapper
return decorator
|
DHLabs/keep | refs/heads/master | keep_backend/backend/urls.py | 1 | from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic.base import TemplateView
from api.data import DataResource
from api.filters import FilterSetResource
from api.repo import RepoResource
from api.study import StudyResource
from api.user import UserResource
from api.viz import VizResource
from api.vocab import VocabResource
# Register resources to make API available
from tastypie.api import Api
v1_api = Api( api_name='v1' )
v1_api.register( DataResource() )
v1_api.register( FilterSetResource() )
v1_api.register( RepoResource() )
v1_api.register( StudyResource() )
v1_api.register( UserResource() )
v1_api.register( VizResource() )
v1_api.register( VocabResource() )
# User views URLs
urlpatterns = patterns( 'backend.views',
# Basic index page
url( regex=r'^$',
view='home',
name='home' ),
# User settings
url( regex=r'^settings/$',
view='settings',
name='settings' ),
# User dashboard
url( regex=r'^(?P<username>(?!static|media)\w+)/$',
view='user_dashboard',
name='user_dashboard' ),
)
# Static pages
urlpatterns += url(r'^features',
TemplateView.as_view(template_name='features.html'),
name='features'),
if settings.DEBUG:
admin.autodiscover()
urlpatterns += patterns( '',
( r'^keep-admin/', include( admin.site.urls )),
)
# Add API urls
urlpatterns += patterns( '', url(r'^api/', include( v1_api.urls ) ) )
# Autocomplete url
urlpatterns += patterns( '', url(regex=r'^api/autocomplete/(?P<endpoint>.*)/$', view='api.autocomplete.autocomplete', name='autocomplete' ) )
# Account registration / login
urlpatterns += patterns( '', url( r'^accounts/',
include( 'backend.registration_urls' ) ) )
urlpatterns += patterns( '', url( r'', include( 'organizations.urls' ) ) )
urlpatterns += patterns( '', url( r'', include( 'repos.urls' ) ) )
# Handle the ODKCollect APIs
urlpatterns += patterns( '', url( r'', include( 'openrosa.urls' ) ) )
# Handle static files on local dev machine
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static( settings.MEDIA_URL, document_root=settings.MEDIA_ROOT )
|
huntxu/fuel-web | refs/heads/master | nailgun/nailgun/test/integration/test_node_nic_handler.py | 2 | # -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import patch
from copy import deepcopy
from oslo_serialization import jsonutils
import six
from nailgun import consts
from nailgun.test.base import BaseIntegrationTest
from nailgun.utils import reverse
class TestHandlers(BaseIntegrationTest):
def test_get_handler_with_wrong_nodeid(self):
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': 1}),
expect_errors=True,
headers=self.default_headers)
self.assertEqual(resp.status_code, 404)
def test_get_handler_with_invalid_data(self):
meta = self.env.default_metadata()
meta["interfaces"] = []
node = self.env.create_node(api=True, meta=meta)
meta_list = [
{'interfaces': None},
{'interfaces': {}}
]
for nic_meta in meta_list:
meta = self.env.default_metadata()
meta.update(nic_meta)
node_data = {'mac': node['mac'], 'meta': meta}
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
expect_errors=True,
headers=self.default_headers
)
self.assertEqual(resp.status_code, 400)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.json_body, [])
def test_get_handler_with_incompleted_iface_data(self):
meta = self.env.default_metadata()
meta["interfaces"] = []
node = self.env.create_node(api=True, meta=meta)
meta_clean_list = [
{'interfaces': [{'name': '', 'mac': '00:00:00:00:00:00'}]},
{'interfaces': [{'mac': '00:00:00:00:00:00'}]},
{'interfaces': [{'name': 'eth0'}]}
]
for nic_meta in meta_clean_list:
meta = self.env.default_metadata()
meta.update(nic_meta)
node_data = {'mac': node['mac'], 'meta': meta}
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
expect_errors=True,
headers=self.default_headers
)
self.assertEqual(resp.status_code, 200)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers
)
self.assertEqual(resp.json_body, [])
def test_get_handler_with_invalid_speed_data(self):
meta = self.env.default_metadata()
meta["interfaces"] = []
node = self.env.create_node(api=True, meta=meta)
meta_clean_list = [
{'interfaces': [{'name': 'eth0', 'mac': '00:00:00:00:00:00',
'max_speed': -100}]},
{'interfaces': [{'name': 'eth0', 'mac': '00:00:00:00:00:00',
'current_speed': -100}]},
{'interfaces': [{'name': 'eth0', 'mac': '00:00:00:00:00:00',
'current_speed': '100'}]},
{'interfaces': [{'name': 'eth0', 'mac': '00:00:00:00:00:00',
'max_speed': 10.0}]},
{'interfaces': [{'name': 'eth0', 'mac': '00:00:00:00:00:00',
'max_speed': '100'}]},
{'interfaces': [{'name': 'eth0', 'mac': '00:00:00:00:00:00',
'current_speed': 10.0}]}
]
for nic_meta in meta_clean_list:
meta = self.env.default_metadata()
meta.update(nic_meta)
node_data = {'mac': node['mac'], 'meta': meta}
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
expect_errors=True,
headers=self.default_headers
)
self.assertEqual(resp.status_code, 200)
resp = self.app.get(
reverse('NodeHandler', kwargs={'obj_id': node['id']}),
headers=self.default_headers
)
ifaces = resp.json_body['meta']['interfaces']
self.assertEqual(
ifaces,
[
{'name': 'eth0', 'mac': '00:00:00:00:00:00',
'max_speed': None, 'current_speed': None}
]
)
def test_get_handler_without_NICs(self):
meta = self.env.default_metadata()
meta["interfaces"] = []
node = self.env.create_node(api=True, meta=meta)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.json_body, [])
@patch.dict('nailgun.api.v1.handlers.version.settings.VERSION', {
'release': '5.0'})
def test_get_handler_with_NICs_before_61(self):
meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(meta, [
{'name': 'eth0', 'mac': self.env.generate_random_mac(),
'current_speed': 1, 'max_speed': 1},
{'name': 'eth1', 'mac': self.env.generate_random_mac(),
'current_speed': 1, 'max_speed': 1}])
self.env.create_node(api=True, meta=meta)
node_db = self.env.nodes[0]
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node_db.id}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
self.assertItemsEqual(
map(lambda i: i['id'], resp.json_body),
map(lambda i: i.id, node_db.interfaces)
)
for nic in meta['interfaces']:
filtered_nics = filter(
lambda i: i['mac'] == nic['mac'],
resp.json_body
)
resp_nic = filtered_nics[0]
self.assertEqual(resp_nic['mac'], nic['mac'])
self.assertEqual(resp_nic['current_speed'], nic['current_speed'])
self.assertEqual(resp_nic['max_speed'], nic['max_speed'])
for conn in ('assigned_networks', ):
self.assertEqual(resp_nic[conn], [])
self.assertNotIn('interface_properties', resp_nic)
def test_nic_mac_swap(self):
mac_eth0 = '00:11:22:dd:ee:ff'
mac_eth1 = 'aa:bb:cc:33:44:55'
eth0 = {
'name': 'eth0',
'mac': mac_eth0,
'current_speed': 1,
'state': 'up',
'pxe': True
}
eth1 = {
'name': 'eth1',
'mac': mac_eth1,
'current_speed': 1,
'state': 'up',
'pxe': False
}
# prepare metadata with our interfaces
meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(meta, [eth0, eth1])
# NOTE(prmtl) hack to have all mac set as we want
# crete_node() will generate random mac for 1st iface
# if we will not set it like that
node_mac = meta['interfaces'][0]['mac']
node = self.env.create_node(api=True, meta=meta, mac=node_mac)
self.env.create_cluster(api=True, nodes=[node['id']])
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
original_nic_info = resp.json
# swap macs, make them uppercase to check that we handle that correctly
eth0['mac'], eth1['mac'] = eth1['mac'].upper(), eth0['mac'].upper()
# update nodes with swapped macs
new_meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(new_meta, [eth0, eth1])
node_data = {'mac': node['mac'], 'meta': new_meta}
self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
headers=self.default_headers)
# check that networks are assigned to the same interfaces
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
updated_nic_info = resp.json
for orig_iface in original_nic_info:
updated_iface = next(
iface for iface in updated_nic_info
if iface['mac'] == orig_iface['mac'])
self.assertEqual(
orig_iface['assigned_networks'],
orig_iface['assigned_networks'])
# nic names were swapped
self.assertNotEqual(orig_iface['name'], updated_iface['name'])
def test_NIC_updates_by_agent(self):
meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(meta, [
{'name': 'eth0', 'mac': '00:00:00:00:00:00', 'current_speed': 1,
'state': 'up'}])
node = self.env.create_node(api=True, meta=meta)
new_meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(new_meta, [
{'name': 'new_nic', 'mac': '00:00:00:00:00:00',
'current_speed': 10, 'max_speed': 10, 'state': 'down'}])
node_data = {'mac': node['mac'], 'meta': new_meta}
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.json_body), 1)
resp_nic = resp.json_body[0]
nic = new_meta['interfaces'][0]
self.assertEqual(resp_nic['mac'], nic['mac'])
self.assertEqual(resp_nic['current_speed'], nic['current_speed'])
self.assertEqual(resp_nic['max_speed'], nic['max_speed'])
self.assertEqual(resp_nic['state'], nic['state'])
for conn in ('assigned_networks', ):
self.assertEqual(resp_nic[conn], [])
def test_NIC_offloading_modes(self):
meta = self.env.default_metadata()
meta["interfaces"] = []
node = self.env.create_node(api=True, meta=meta)
new_meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(new_meta, [
{'name': 'new_nic',
'mac': '00:00:00:00:00:00',
'offloading_modes': [
{
'name': 'mode_1',
'state': True,
"sub": []
},
{
'name': 'mode_2',
'state': False,
"sub": []
},
{
'name': 'mode_3',
'state': None,
"sub": []
}
]}])
node_data = {'mac': node['mac'], 'meta': new_meta}
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.json_body), 1)
resp_nic = resp.json_body[0]
nic = new_meta['interfaces'][0]
self.assertEqual(resp_nic['offloading_modes'], nic['offloading_modes'])
def test_NIC_change_offloading_modes(self):
meta = self.env.default_metadata()
meta["interfaces"] = []
node = self.env.create_node(api=True, meta=meta)
new_meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(new_meta, [
{'name': 'new_nic',
'mac': '00:00:00:00:00:00',
'offloading_modes': [
{
'name': 'mode_1',
'state': None,
"sub": []
},
{
'name': 'mode_2',
'state': None,
"sub": []
},
{
'name': 'mode_3',
'state': None,
"sub": []
}
]}])
node_data = {'mac': node['mac'], 'meta': new_meta}
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.json_body), 1)
resp_nic = resp.json_body[0]
nic = new_meta['interfaces'][0]
self.assertEqual(resp_nic['offloading_modes'], nic['offloading_modes'])
resp = self.app.get(
reverse('NodeCollectionHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.json_body), 1)
resp_node = resp.json_body[0]
new_nic = {
'name': 'new_nic',
'mac': '00:00:00:00:00:00',
'offloading_modes': [
{
'name': 'mode_1',
'state': True,
"sub": []
},
{
'name': 'mode_2',
'state': False,
"sub": []
},
{
'name': 'mode_3',
'state': None,
"sub": []
}
]
}
self.env.set_interfaces_in_meta(resp_node["meta"], [
new_nic])
resp_node.pop('group_id')
resp = self.app.put(
reverse('NodeCollectionHandler'),
jsonutils.dumps([resp_node]),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.json_body), 1)
resp_nic = resp.json_body[0]
self.assertEqual(
resp_nic['offloading_modes'],
new_nic['offloading_modes'])
def test_NIC_locking_on_update_by_agent(self):
lock_vs_status = {
consts.NODE_STATUSES.discover: False,
consts.NODE_STATUSES.error: False,
consts.NODE_STATUSES.provisioning: True,
consts.NODE_STATUSES.provisioned: False,
consts.NODE_STATUSES.deploying: True,
consts.NODE_STATUSES.ready: True,
consts.NODE_STATUSES.removing: True}
meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(meta, [
{'name': 'eth0', 'mac': '00:00:00:00:00:00', 'current_speed': 1,
'state': 'up', 'pxe': True}])
self.env.create_node(api=True, meta=meta)
new_meta = deepcopy(meta)
node = self.env.nodes[0]
for status, lock in six.iteritems(lock_vs_status):
node.status = status
self.db.flush()
new_meta['interfaces'][0]['current_speed'] += 1
node_data = {'mac': node['mac'], 'meta': new_meta}
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
resp_nic = resp.json_body[0]
new_speed = new_meta['interfaces'][0]['current_speed']
old_speed = meta['interfaces'][0]['current_speed']
self.assertEqual(resp_nic['current_speed'],
old_speed if lock else new_speed)
meta['interfaces'][0]['current_speed'] = resp_nic['current_speed']
@patch.dict('nailgun.api.v1.handlers.version.settings.VERSION', {
'release': '6.1'})
def test_interface_properties_after_update_by_agent(self):
meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(meta, [
{'name': 'eth0', 'mac': '00:00:00:00:00:00', 'current_speed': 1,
'pxe': True, 'state': 'up'}])
self.env.create(
nodes_kwargs=[
{"api": True, 'meta': meta}
]
)
node = self.env.nodes[0]
node_data = {'mac': node['mac'], 'meta': meta}
# check default interface_properties values
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
nic = resp.json_body[0]
self.assertEqual(nic['interface_properties'],
{'disable_offloading': False,
'mtu': None})
# change mtu
nic['interface_properties']['mtu'] = 1500
nodes_list = [{'id': node['id'], 'interfaces': [nic]}]
resp_put = self.app.put(
reverse('NodeCollectionNICsHandler'),
jsonutils.dumps(nodes_list),
headers=self.default_headers
)
self.assertEqual(resp_put.status_code, 200)
# update NICs by agent (no interface_properties values provided)
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
# check interface_properties values were not reset to default
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
resp_nic = resp.json_body[0]
self.assertEqual(resp_nic['interface_properties'],
{'disable_offloading': False,
'mtu': 1500})
def test_nic_adds_by_agent(self):
meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(meta, [
{'name': 'eth0', 'mac': '00:00:00:00:00:00', 'current_speed': 1,
'pxe': True, 'state': 'up'}])
node = self.env.create_node(api=True, meta=meta)
meta['interfaces'].append({
'name': 'new_nic', 'mac': '00:00:00:00:00:01'})
node_data = {'mac': node['mac'], 'meta': meta}
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.json_body), len(meta['interfaces']))
for nic in meta['interfaces']:
filtered_nics = filter(
lambda i: i['mac'] == nic['mac'],
resp.json_body
)
resp_nic = filtered_nics[0]
self.assertEqual(resp_nic['mac'], nic['mac'])
self.assertEqual(resp_nic['current_speed'],
nic.get('current_speed'))
self.assertEqual(resp_nic['max_speed'], nic.get('max_speed'))
self.assertEqual(resp_nic['state'], nic.get('state'))
for conn in ('assigned_networks', ):
self.assertEqual(resp_nic[conn], [])
def test_ignore_NIC_id_in_meta(self):
fake_id = 'some_data'
meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(meta, [
{'id': fake_id, 'name': 'eth0', 'mac': '12345'}])
node = self.env.create_node(api=True, meta=meta)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
self.assertNotEquals(resp.json_body[0]['id'], fake_id)
def test_mac_address_should_be_in_lower_case(self):
meta = self.env.default_metadata()
new_mac = 'AA:BB:CC:DD:11:22'
self.env.set_interfaces_in_meta(meta, [
{'name': 'eth0', 'mac': new_mac}])
node = self.env.create_node(api=True, meta=meta)
resp = self.app.get(
reverse('NodeNICsHandler', kwargs={'node_id': node['id']}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
self.assertNotEquals(resp.json_body[0]['mac'], new_mac.lower())
def test_remove_assigned_interface(self):
def get_nodes():
resp = self.app.get(
reverse('NodeCollectionHandler',
kwargs={'cluster_id': self.env.clusters[0].id}),
headers=self.default_headers,
)
return resp.json_body
self.env.create(nodes_kwargs=[{'api': True}])
# check all possible handlers
for handler in ('NodeAgentHandler',
'NodeHandler',
'NodeCollectionHandler'):
# create node and check it availability
nodes_data = get_nodes()
self.assertEqual(len(nodes_data), 1)
# remove all interfaces except admin one
adm_eth = self.env.network_manager._get_interface_by_network_name(
nodes_data[0]['id'], 'fuelweb_admin')
ifaces = list(nodes_data[0]['meta']['interfaces'])
nodes_data[0]['meta']['interfaces'] = \
[i for i in ifaces if i['name'] == adm_eth.name]
# prepare put request
data = {
'id': nodes_data[0]['id'],
'meta': nodes_data[0]['meta'],
}
if handler in ('NodeCollectionHandler', ):
data = [data]
if handler in ('NodeHandler', ):
endpoint = reverse(handler, kwargs={'obj_id': data['id']})
else:
endpoint = reverse(handler)
self.app.put(
endpoint,
jsonutils.dumps(data),
headers=self.default_headers,
)
# check the node is visible for api
nodes_data = get_nodes()
self.assertEqual(len(nodes_data), 1)
self.assertEqual(len(nodes_data[0]['meta']['interfaces']), 1)
# restore removed interfaces
nodes_data[0]['meta']['interfaces'] = ifaces
self.app.put(
reverse(
'NodeAgentHandler',
),
jsonutils.dumps({
'id': nodes_data[0]['id'],
'meta': nodes_data[0]['meta'],
}),
headers=self.default_headers,
)
# check node availability
nodes_data = get_nodes()
self.assertEqual(len(nodes_data), 1)
self.assertItemsEqual(nodes_data[0]['meta']['interfaces'], ifaces)
def test_change_mac_of_assigned_nics(self):
def get_nodes():
resp = self.app.get(
reverse('NodeCollectionHandler',
kwargs={'cluster_id': self.env.clusters[0].id}),
headers=self.default_headers,
)
return resp.json_body
meta = self.env.default_metadata()
meta["interfaces"] = [
{'name': 'eth0', 'mac': self.env.generate_random_mac(),
'pxe': True},
{'name': 'eth1', 'mac': self.env.generate_random_mac()},
{'name': 'eth2', 'mac': self.env.generate_random_mac()},
{'name': 'eth3', 'mac': self.env.generate_random_mac()},
{'name': 'eth4', 'mac': self.env.generate_random_mac()},
]
self.env.create(nodes_kwargs=[{'api': True, 'meta': meta}])
# check all possible handlers
for handler in ('NodeAgentHandler',
'NodeHandler',
'NodeCollectionHandler'):
# create node and check it availability
nodes_data = get_nodes()
self.assertEqual(len(nodes_data), 1)
# change mac address of interfaces except admin one
adm_eth = self.env.network_manager._get_interface_by_network_name(
nodes_data[0]['id'], 'fuelweb_admin')
for iface in nodes_data[0]['meta']['interfaces']:
if iface['name'] != adm_eth.name:
iface['mac'] = self.env.generate_random_mac()
# prepare put request
data = {
'id': nodes_data[0]['id'],
'meta': nodes_data[0]['meta'],
}
if handler in ('NodeCollectionHandler', ):
data = [data]
if handler in ('NodeHandler', ):
endpoint = reverse(handler, kwargs={'obj_id': data['id']})
else:
endpoint = reverse(handler)
self.app.put(
endpoint,
jsonutils.dumps(data),
headers=self.default_headers,
)
# check the node is visible for api
nodes_data = get_nodes()
self.assertEqual(len(nodes_data), 1)
def test_pxe_for_admin_nws_restriction(self):
meta = self.env.default_metadata()
# We are using reverse ordered by iface name list
# for reproducing bug #1474330
meta['interfaces'] = [
{'name': 'eth1', 'mac': self.env.generate_random_mac(),
'pxe': False},
{'name': 'eth0', 'mac': self.env.generate_random_mac(),
'pxe': False},
]
self.env.create(nodes_kwargs=[{'api': False, 'meta': meta}])
cluster = self.env.clusters[0]
node = cluster.nodes[0]
# Processing data through NodeHandler
resp = self.app.get(
reverse('NodeHandler', kwargs={'obj_id': node.id}),
headers=self.default_headers,
)
data = resp.json_body
resp = self.app.put(
reverse('NodeHandler', kwargs={'obj_id': data['id']}),
jsonutils.dumps(data),
headers=self.default_headers,
)
self.assertEqual(resp.status_code, 200)
# Processing data through NICsHander
resp = self.app.get(
reverse("NodeNICsHandler", kwargs={"node_id": node.id}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
data = resp.json_body
resp = self.app.put(
reverse("NodeNICsHandler", kwargs={"node_id": node.id}),
jsonutils.dumps(data),
headers=self.default_headers,
)
self.assertEqual(resp.status_code, 200)
|
Vishluck/sympy | refs/heads/master | sympy/physics/unitsystems/systems/mksa.py | 77 | # -*- coding: utf-8 -*-
"""
MKS unit system.
MKS stands for "meter, kilogram, second, ampere".
"""
from __future__ import division
from sympy import pi
from sympy.physics.unitsystems.dimensions import Dimension
from sympy.physics.unitsystems.units import Unit, Constant
from sympy.physics.unitsystems.prefixes import PREFIXES, prefix_unit
from sympy.physics.unitsystems.systems.mks import mks_dim, mks
# base dimensions
current = Dimension(name='current', symbol='I', current=1)
# derived dimensions
voltage = Dimension(name='voltage', symbol='U', mass=1, length=2, current=-1,
time=-3)
impedance = Dimension(name='impedance', symbol='Z', mass=1, length=2,
current=-2, time=-3)
conductance = Dimension(name='conductance', symbol='G', mass=-1, length=-2,
current=2, time=3)
capacitance = Dimension(name='capacitance', mass=-1, length=-2, current=2,
time=4)
inductance = Dimension(name='inductance', mass=1, length=2, current=-2, time=-2)
charge = Dimension(name='charge', symbol='Q', current=1, time=1)
magnetic_density = Dimension(name='charge', symbol='B', mass=1, current=-1,
time=-2)
magnetic_flux = Dimension(name='charge', length=2, mass=1, current=-1, time=-2)
dims = (voltage, impedance, conductance, capacitance, inductance, charge,
magnetic_density, magnetic_flux)
# dimension system
mksa_dim = mks_dim.extend(base=(current,), dims=dims, name='MKSA')
# base units
A = Unit(current, abbrev='A')
# derived units
V = Unit(voltage, factor=10**3, abbrev='V')
ohm = Unit(impedance, factor=10**3, abbrev='ohm')
# siemens
S = Unit(conductance, factor=10**-3, abbrev='S')
# farad
F = Unit(capacitance, factor=10**-3, abbrev='F')
# henry
H = Unit(inductance, factor=10**3, abbrev='H')
# coulomb
C = Unit(charge, abbrev='C')
# tesla
T = Unit(magnetic_density, abbrev='T')
# weber
Wb = Unit(magnetic_flux, abbrev='Wb')
# constants
# Wave impedance of free space
Z0 = Constant(impedance, factor=119.9169832*pi, abbrev='Z_0')
units = [A, V, ohm, S, F, H, C, T, Wb]
all_units = []
for u in units:
all_units.extend(prefix_unit(u, PREFIXES))
all_units.extend([Z0])
mksa = mks.extend(base=(A,), units=all_units, name='MKSA')
|
davidbkemp/node-gyp | refs/heads/master | gyp/test/copies/gyptest-all.py | 264 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies file copies using an explicit build target of 'all'.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('copies.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('copies.gyp', test.ALL, chdir='relocate/src')
test.must_match(['relocate', 'src', 'copies-out', 'file1'], 'file1 contents\n')
test.built_file_must_match('copies-out/file2',
'file2 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out/directory/file3',
'file3 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out/directory/file4',
'file4 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out/directory/subdir/file5',
'file5 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out/subdir/file6',
'file6 contents\n',
chdir='relocate/src')
test.pass_test()
|
gauravsitlani/programming | refs/heads/master | machine_learning/linear_regression/linear_regression.py | 1 | # manual algebrical method
import numpy as np
import sys
print(sys.version)
x = np.array([[1, 2, 1], [2, 3, 1], [3, 4, 1], [6, 8, 1], [14, 6, 1]])
y = [4, 7, 11, 21, 25]
x_tr = np.array(x).T
b = np.dot(np.dot(np.linalg.inv(np.dot(x_tr,x)), x_tr) ,y)
print(b)
# sklearn method
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
x = np.array([[1, 2], [2, 3], [3, 4], [6, 8], [14, 6]])
lr.fit(x, y)
print(lr.coef_, lr.intercept_) |
andrew-aladev/samba-talloc-debug | refs/heads/master | source4/scripting/bin/w32err_code.py | 56 | #!/usr/bin/env python
# Unix SMB/CIFS implementation.
# Copyright (C) Kamen Mazdrashki <kamen.mazdrashki@postpath.com> 2009
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Import generete werror.h/doserr.c files from WSPP HTML"""
import re
import os
import sys
import urllib
import pprint
from xml.dom import minidom
from optparse import OptionParser, OptionGroup
_wspp_werror_url = 'http://msdn.microsoft.com/en-us/library/cc231199%28PROT.10%29.aspx'
class WerrorHtmlParser(object):
"""
Parses HTML from WSPP documentation generating dictionary of
dictionaries with following keys:
- "err_hex" - hex number (as string)
- "err_name" - error name
- "err_desc" - error long description
For the key of returned dictionary err_hex is used,
i.e. "hex-error-code-str" => {error dictionary object}
"""
ERROR_PREFIX = ['ERROR_', 'NERR_', 'FRS_', 'RPC_', 'EPT_', 'OR_', 'WAIT_TIMEOUT']
ERROR_REPLACE = ['ERROR_']
def __init__(self, opt):
self.opt = opt
self._errors_skipped = []
pass
def _is_error_code_name(self, err_name):
for pref in self.ERROR_PREFIX:
if err_name.startswith(pref):
return True
return False
def _make_werr_name(self, err_name):
err_name = err_name.upper()
for pref in self.ERROR_REPLACE:
if err_name.startswith(pref):
return err_name.replace(pref, 'WERR_', 1)
return 'WERR_' + err_name
def parse_url(self, url):
errors = {}
html = self._load_url(url)
# let minidom to parse the tree, should be:
# table -> tr -> td
# p -> [hex code, br, error code]
# p -> [description]
table_node = minidom.parseString(html)
for row_node in table_node.getElementsByTagName("tr"):
# verify we got right number of td elements
td_nodes = row_node.getElementsByTagName('td')
if len(td_nodes) != 2:
continue
# now get the real data
p_nodes = row_node.getElementsByTagName('p')
if len(p_nodes) != 2: continue
if len(p_nodes[0].childNodes) != 3: continue
if len(p_nodes[1].childNodes) != 1: continue
err_hex = str(p_nodes[0].childNodes[0].nodeValue)
err_name = str(p_nodes[0].childNodes[2].nodeValue)
err_desc = p_nodes[1].childNodes[0].nodeValue.encode('utf-8')
err_desc = err_desc.replace('"', '\\"').replace("\'", "\\'")
# do some checking
if not err_hex.startswith('0x'): continue
if not self._is_error_code_name(err_name):
self._errors_skipped.append("%s - %s - %d" % (err_name, err_hex, int(err_hex, 16)))
continue
# create entry
err_name = self._make_werr_name(err_name)
err_def = {'err_hex': err_hex,
'err_name': err_name,
'err_desc': err_desc,
'code': int(err_hex, 16)}
errors[err_def['code']] = err_def
# print skipped errors
if self.opt.print_skipped and len(self._errors_skipped):
print "\nErrors skipped during HTML parsing:"
pprint.pprint(self._errors_skipped)
print "\n"
return errors
def _load_url(self, url):
html_str = ""
try:
fp = urllib.urlopen(url)
for line in fp:
html_str += line.strip()
fp.close()
except IOError, e:
print "error loading url: " + e.strerror
pass
# currently ERROR codes are rendered as table
# locate table chunk with ERROR_SUCCESS
html = [x for x in html_str.split('<table ') if "ERROR_SUCCESS" in x]
html = '<table ' + html[0]
pos = html.find('</table>')
if pos == -1:
return '';
html = html[:pos] + '</table>'
# html clean up
html = re.sub(r'<a[^>]*>(.*?)</a>', r'\1', html)
return html
class WerrorGenerator(object):
"""
provides methods to generate parts of werror.h and doserr.c files
"""
FNAME_WERRORS = 'w32errors.lst'
FNAME_WERROR_DEFS = 'werror_defs.h'
FNAME_DOSERR_DEFS = 'doserr_defs.c'
FNAME_DOSERR_DESC = 'doserr_desc.c'
def __init__(self, opt):
self.opt = opt
self._out_dir = opt.out_dir
pass
def _open_out_file(self, fname):
fname = os.path.join(self._out_dir, fname)
return open(fname, "w")
def _gen_werrors_list(self, errors):
"""uses 'errors' dictionary to display list of Win32 Errors"""
fp = self._open_out_file(self.FNAME_WERRORS)
for err_code in sorted(errors.keys()):
err_name = errors[err_code]['err_name']
fp.write(err_name)
fp.write("\n")
fp.close()
def _gen_werror_defs(self, errors):
"""uses 'errors' dictionary to generate werror.h file"""
fp = self._open_out_file(self.FNAME_WERROR_DEFS)
for err_code in sorted(errors.keys()):
err_name = errors[err_code]['err_name']
err_hex = errors[err_code]['err_hex']
fp.write('#define %s\tW_ERROR(%s)' % (err_name, err_hex))
fp.write("\n")
fp.close()
def _gen_doserr_defs(self, errors):
"""uses 'errors' dictionary to generate defines in doserr.c file"""
fp = self._open_out_file(self.FNAME_DOSERR_DEFS)
for err_code in sorted(errors.keys()):
err_name = errors[err_code]['err_name']
fp.write('\t{ "%s", %s },' % (err_name, err_name))
fp.write("\n")
fp.close()
def _gen_doserr_descriptions(self, errors):
"""uses 'errors' dictionary to generate descriptions in doserr.c file"""
fp = self._open_out_file(self.FNAME_DOSERR_DESC)
for err_code in sorted(errors.keys()):
err_name = errors[err_code]['err_name']
fp.write('\t{ %s, "%s" },' % (err_name, errors[err_code]['err_desc']))
fp.write("\n")
fp.close()
def _lookup_error_by_name(self, err_name, defined_errors):
for err in defined_errors.itervalues():
if err['err_name'] == err_name:
return err
return None
def _filter_errors(self, errors, defined_errors):
"""
returns tuple (new_erros, diff_code_errors, diff_name_errors)
new_errors - dictionary of errors not in defined_errors
diff_code_errors - list of errors found in defined_errors
but with different value
diff_name_errors - list of errors found with same code in
defined_errors, but with different name
Most critical is diff_code_errors list to be empty!
"""
new_errors = {}
diff_code_errors = []
diff_name_errors = []
for err_def in errors.itervalues():
add_error = True
# try get defined error by code
if defined_errors.has_key(err_def['code']):
old_err = defined_errors[err_def['code']]
if err_def['err_name'] != old_err['err_name']:
warning = {'msg': 'New and Old errors has different error names',
'err_new': err_def,
'err_old': old_err}
diff_name_errors.append(warning)
# sanity check for errors with same name but different values
old_err = self._lookup_error_by_name(err_def['err_name'], defined_errors)
if old_err:
if err_def['code'] != old_err['code']:
warning = {'msg': 'New and Old error defs has different error value',
'err_new': err_def,
'err_old': old_err}
diff_code_errors.append(warning)
# exclude error already defined with same name
add_error = False
# do add the error in new_errors if everything is fine
if add_error:
new_errors[err_def['code']] = err_def
pass
return (new_errors, diff_code_errors, diff_name_errors)
def generate(self, errors):
# load already defined error codes
werr_parser = WerrorParser(self.opt)
(defined_errors,
no_value_errors) = werr_parser.load_err_codes(self.opt.werror_file)
if not defined_errors:
print "\nUnable to load existing errors file: %s" % self.opt.werror_file
sys.exit(1)
if self.opt.verbose and len(no_value_errors):
print "\nWarning: there are errors defines using macro value:"
pprint.pprint(no_value_errors)
print ""
# filter generated error codes
(new_errors,
diff_code_errors,
diff_name_errors) = self._filter_errors(errors, defined_errors)
if diff_code_errors:
print("\nFound %d errors with same names but different error values! Aborting."
% len(diff_code_errors))
pprint.pprint(diff_code_errors)
sys.exit(2)
if diff_name_errors:
print("\nFound %d errors with same values but different names (should be normal)"
% len(diff_name_errors))
pprint.pprint(diff_name_errors)
# finally generate output files
self._gen_werror_defs(new_errors)
self._gen_doserr_defs(new_errors)
self._gen_werrors_list(errors)
self._gen_doserr_descriptions(errors)
pass
class WerrorParser(object):
"""
Parses errors defined in werror.h file
"""
def __init__(self, opt):
self.opt = opt
pass
def _parse_werror_line(self, line):
m = re.match('#define[ \t]*(.*?)[ \t]*W_ERROR\((.*?)\)', line)
if not m or (len(m.groups()) != 2):
return None
if len(m.group(1)) == 0:
return None
if str(m.group(2)).startswith('0x'):
err_code = int(m.group(2), 16)
elif m.group(2).isdigit():
err_code = int(m.group(2))
else:
self.err_no_values.append(line)
return None
return {'err_name': str(m.group(1)),
'err_hex': "0x%08X" % err_code,
'code': err_code}
pass
def load_err_codes(self, fname):
"""
Returns tuple of:
dictionary of "hex_err_code" => {code, name}
"hex_err_code" is string
"code" is int value for the error
list of errors that was ignored for some reason
"""
# reset internal variables
self.err_no_values = []
err_codes = {}
fp = open(fname)
for line in fp.readlines():
err_def = self._parse_werror_line(line)
if err_def:
err_codes[err_def['code']] = err_def
fp.close();
return (err_codes, self.err_no_values)
def _generate_files(opt):
parser = WerrorHtmlParser(opt)
errors = parser.parse_url(opt.url)
out = WerrorGenerator(opt)
out.generate(errors)
pass
if __name__ == '__main__':
_cur_dir = os.path.abspath(os.path.dirname(__file__))
opt_parser = OptionParser(usage="usage: %prog [options]", version="%prog 0.3")
opt_group = OptionGroup(opt_parser, "Main options")
opt_group.add_option("--url", dest="url",
default=_wspp_werror_url,
help="url for w32 error codes html - may be local file")
opt_group.add_option("--out", dest="out_dir",
default=_cur_dir,
help="output dir for generated files")
opt_group.add_option("--werror", dest="werror_file",
default=os.path.join(_cur_dir, 'werror.h'),
help="path to werror.h file")
opt_group.add_option("--print_skipped",
action="store_true", dest="print_skipped", default=False,
help="print errors skipped during HTML parsing")
opt_group.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print warnings to stdout")
opt_parser.add_option_group(opt_group)
(options, args) = opt_parser.parse_args()
# add some options to be used internally
options.err_defs_file = os.path.join(options.out_dir, WerrorGenerator.FNAME_WERROR_DEFS)
options.dos_defs_file = os.path.join(options.out_dir, WerrorGenerator.FNAME_DOSERR_DEFS)
options.dos_desc_file = os.path.join(options.out_dir, WerrorGenerator.FNAME_DOSERR_DESC)
# check options
_generate_files(options)
|
KaranToor/MA450 | refs/heads/master | google-cloud-sdk/.install/.backup/platform/gsutil/third_party/boto/tests/integration/s3/test_cert_verification.py | 126 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Check that all of the certs on SQS endpoints validate.
"""
import unittest
from tests.integration import ServiceCertVerificationTest
import boto.s3
class S3CertVerificationTest(unittest.TestCase, ServiceCertVerificationTest):
s3 = True
regions = boto.s3.regions()
def sample_service_call(self, conn):
conn.get_all_buckets()
|
shenyy/lily2-gem5 | refs/heads/master | src/arch/x86/isa/insts/simd128/floating_point/logical/exclusive_or.py | 91 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop XORPD_XMM_XMM {
mxor xmml, xmml, xmmlm
mxor xmmh, xmmh, xmmhm
};
def macroop XORPD_XMM_M {
ldfp ufp1, seg, sib, disp, dataSize=8
ldfp ufp2, seg, sib, "DISPLACEMENT + 8", dataSize=8
mxor xmml, xmml, ufp1
mxor xmmh, xmmh, ufp2
};
def macroop XORPD_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
ldfp ufp2, seg, riprel, "DISPLACEMENT + 8", dataSize=8
mxor xmml, xmml, ufp1
mxor xmmh, xmmh, ufp2
};
def macroop XORPS_XMM_XMM {
mxor xmml, xmml, xmmlm
mxor xmmh, xmmh, xmmhm
};
def macroop XORPS_XMM_M {
ldfp ufp1, seg, sib, disp, dataSize=8
ldfp ufp2, seg, sib, "DISPLACEMENT + 8", dataSize=8
mxor xmml, xmml, ufp1
mxor xmmh, xmmh, ufp2
};
def macroop XORPS_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
ldfp ufp2, seg, riprel, "DISPLACEMENT + 8", dataSize=8
mxor xmml, xmml, ufp1
mxor xmmh, xmmh, ufp2
};
'''
|
moyeah/wpc | refs/heads/master | windcells.py | 1 | #!/usr/bin/env python
import pygtk
pygtk.require('2.0')
import gobject
import gtk
from math import *
import wpcutils as wpc
# columns
(
COLUMN_SPEED,
COLUMN_WIND,
COLUMN_EDITABLE
) = range(3)
# data
winds = [["0.0", "0.0", True]]
class WindButtonBox(gtk.HButtonBox):
def __init__(self, box, wc):
gtk.HButtonBox.__init__(self)
self.set_border_width(5)
self.set_layout(gtk.BUTTONBOX_END)
self.set_spacing(5)
box.pack_start(self, False, False)
button = gtk.Button(stock='gtk-add')
button.connect('clicked', lambda *w: wc.add_item())
self.add(button)
button = gtk.Button(stock='gtk-remove')
button.connect('clicked', lambda *w: wc.remove_item())
self.add(button)
class WindCells(gtk.ScrolledWindow):
def __init__(self, parent=None, border=5):
gtk.ScrolledWindow.__init__(self)
self.set_border_width(border)
self.set_shadow_type(gtk.SHADOW_ETCHED_IN)
self.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
# create data
self.model = self.__create_model()
# create treeview
self.treeview = gtk.TreeView(self.model)
self.treeview.set_rules_hint(True)
self.treeview.get_selection().set_mode(gtk.SELECTION_SINGLE)
self.add(self.treeview)
self.__add_columns(self.treeview)
self.show_all()
def __create_model(self):
# create list store
model = gtk.ListStore(gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_BOOLEAN)
self.__add_itens(model)
return model
def __add_itens(self, model):
# add itens
for item in winds:
iter = model.append()
model.set(iter,
COLUMN_SPEED, item[COLUMN_SPEED],
COLUMN_WIND, item[COLUMN_WIND],
COLUMN_EDITABLE, item[COLUMN_EDITABLE])
def __add_columns(self, treeview):
model = treeview.get_model()
# speed column
renderer = gtk.CellRendererText()
renderer.set_data("column", COLUMN_SPEED)
renderer.connect('edited', self.on_cell_edited, model)
column = gtk.TreeViewColumn("u [m/s]", renderer,
text=COLUMN_SPEED,
editable=COLUMN_EDITABLE)
column.set_sort_column_id(COLUMN_SPEED)
treeview.append_column(column)
# wind column
renderer = gtk.CellRendererText()
renderer.set_data("column", COLUMN_WIND)
renderer.connect('edited', self.on_cell_edited, model)
column = gtk.TreeViewColumn("f(u) [%]", renderer,
text=COLUMN_WIND,
editable=COLUMN_EDITABLE)
column.set_sort_column_id(COLUMN_WIND)
treeview.append_column(column)
def on_cell_edited(self, cell, path_string, new_text, model):
column = cell.get_data("column")
try:
value = float(eval(new_text))
if(value < 0):
value = 0.0
except (SyntaxError, TypeError, ValueError), error:
wpc.error_dialog(error)
except NameError:
if column == COLUMN_WIND:
u = 1
try:
float(eval(new_text))
value = new_text
except (SyntaxError, TypeError, ValueError, NameError), error:
wpc.error_dialog(error)
if 'value' in locals():
iter = model.get_iter_from_string(path_string)
path = model.get_path(iter)[0]
if column == COLUMN_SPEED:
winds[path][COLUMN_SPEED] = str(value)
winds.sort(key = lambda x: float(x[0]))
model.clear()
self.__add_itens(model)
elif column == COLUMN_WIND:
winds[path][COLUMN_WIND] = str(value)
model.set(iter, column, winds[path][COLUMN_WIND])
def get_winds(self):
return winds
def get_model(self):
return self.model
def add_item(self):
new_item = ["0.0", "0.0", True]
winds.insert(0, new_item)
iter = self.model.insert_before(self.model.get_iter_root())
self.model.set(iter,
COLUMN_SPEED, new_item[COLUMN_SPEED],
COLUMN_WIND, new_item[COLUMN_WIND],
COLUMN_EDITABLE, new_item[COLUMN_EDITABLE])
def remove_item(self):
selection = self.treeview.get_selection()
model, iter = selection.get_selected()
if iter:
path = model.get_path(iter)[0]
model.remove(iter)
del winds[path]
|
mcr/ietfdb | refs/heads/master | ietf/doc/migrations/0008_auto__add_deletedevent.py | 1 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'DeletedEvent'
db.create_table('doc_deletedevent', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('json', self.gf('django.db.models.fields.TextField')()),
('by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['person.Person'])),
('time', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('doc', ['DeletedEvent'])
def backwards(self, orm):
# Deleting model 'DeletedEvent'
db.delete_table('doc_deletedevent')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'doc.ballotdocevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'BallotDocEvent', '_ormbases': ['doc.DocEvent']},
'ballot_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.BallotType']"}),
'docevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['doc.DocEvent']", 'unique': 'True', 'primary_key': 'True'})
},
'doc.ballotpositiondocevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'BallotPositionDocEvent', '_ormbases': ['doc.DocEvent']},
'ad': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Person']"}),
'ballot': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['doc.BallotDocEvent']", 'null': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'comment_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'discuss': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'discuss_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'docevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['doc.DocEvent']", 'unique': 'True', 'primary_key': 'True'}),
'pos': ('django.db.models.fields.related.ForeignKey', [], {'default': "'norecord'", 'to': "orm['name.BallotPositionName']"})
},
'doc.ballottype': {
'Meta': {'ordering': "['order']", 'object_name': 'BallotType'},
'doc_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.DocTypeName']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'positions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['name.BallotPositionName']", 'symmetrical': 'False', 'blank': 'True'}),
'question': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'doc.consensusdocevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'ConsensusDocEvent', '_ormbases': ['doc.DocEvent']},
'consensus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'docevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['doc.DocEvent']", 'unique': 'True', 'primary_key': 'True'})
},
'doc.deletedevent': {
'Meta': {'object_name': 'DeletedEvent'},
'by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Person']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'json': ('django.db.models.fields.TextField', [], {}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
'doc.docalias': {
'Meta': {'object_name': 'DocAlias'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'doc.docevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'DocEvent'},
'by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Person']"}),
'desc': ('django.db.models.fields.TextField', [], {}),
'doc': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'doc.dochistory': {
'Meta': {'object_name': 'DocHistory'},
'abstract': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'ad': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ad_dochistory_set'", 'null': 'True', 'to': "orm['person.Person']"}),
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['person.Email']", 'symmetrical': 'False', 'through': "orm['doc.DocHistoryAuthor']", 'blank': 'True'}),
'doc': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'history_set'", 'to': "orm['doc.Document']"}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'external_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['group.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intended_std_level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.IntendedStdLevelName']", 'null': 'True', 'blank': 'True'}),
'internal_comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'notify': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'pages': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'related': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['doc.DocAlias']", 'symmetrical': 'False', 'through': "orm['doc.RelatedDocHistory']", 'blank': 'True'}),
'rev': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'shepherd': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'shepherd_dochistory_set'", 'null': 'True', 'to': "orm['person.Person']"}),
'states': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['doc.State']", 'symmetrical': 'False', 'blank': 'True'}),
'std_level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.StdLevelName']", 'null': 'True', 'blank': 'True'}),
'stream': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.StreamName']", 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['name.DocTagName']", 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.DocTypeName']", 'null': 'True', 'blank': 'True'})
},
'doc.dochistoryauthor': {
'Meta': {'ordering': "['document', 'order']", 'object_name': 'DocHistoryAuthor'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Email']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.DocHistory']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {})
},
'doc.docreminder': {
'Meta': {'object_name': 'DocReminder'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'due': ('django.db.models.fields.DateTimeField', [], {}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.DocEvent']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.DocReminderTypeName']"})
},
'doc.document': {
'Meta': {'object_name': 'Document'},
'abstract': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'ad': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ad_document_set'", 'null': 'True', 'to': "orm['person.Person']"}),
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['person.Email']", 'symmetrical': 'False', 'through': "orm['doc.DocumentAuthor']", 'blank': 'True'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'external_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['group.Group']", 'null': 'True', 'blank': 'True'}),
'intended_std_level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.IntendedStdLevelName']", 'null': 'True', 'blank': 'True'}),
'internal_comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'notify': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'pages': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'related': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'reversely_related_document_set'", 'blank': 'True', 'through': "orm['doc.RelatedDocument']", 'to': "orm['doc.DocAlias']"}),
'rev': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'shepherd': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'shepherd_document_set'", 'null': 'True', 'to': "orm['person.Person']"}),
'states': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['doc.State']", 'symmetrical': 'False', 'blank': 'True'}),
'std_level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.StdLevelName']", 'null': 'True', 'blank': 'True'}),
'stream': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.StreamName']", 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['name.DocTagName']", 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.DocTypeName']", 'null': 'True', 'blank': 'True'})
},
'doc.documentauthor': {
'Meta': {'ordering': "['document', 'order']", 'object_name': 'DocumentAuthor'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Email']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'doc.initialreviewdocevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'InitialReviewDocEvent', '_ormbases': ['doc.DocEvent']},
'docevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['doc.DocEvent']", 'unique': 'True', 'primary_key': 'True'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'doc.lastcalldocevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'LastCallDocEvent', '_ormbases': ['doc.DocEvent']},
'docevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['doc.DocEvent']", 'unique': 'True', 'primary_key': 'True'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'doc.newrevisiondocevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'NewRevisionDocEvent', '_ormbases': ['doc.DocEvent']},
'docevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['doc.DocEvent']", 'unique': 'True', 'primary_key': 'True'}),
'rev': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'doc.relateddochistory': {
'Meta': {'object_name': 'RelatedDocHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'relationship': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.DocRelationshipName']"}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.DocHistory']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reversely_related_document_history_set'", 'to': "orm['doc.DocAlias']"})
},
'doc.relateddocument': {
'Meta': {'object_name': 'RelatedDocument'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'relationship': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.DocRelationshipName']"}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.Document']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.DocAlias']"})
},
'doc.state': {
'Meta': {'ordering': "['type', 'order']", 'object_name': 'State'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'next_states': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'previous_states'", 'blank': 'True', 'to': "orm['doc.State']"}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.StateType']"}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'doc.statedocevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'StateDocEvent', '_ormbases': ['doc.DocEvent']},
'docevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['doc.DocEvent']", 'unique': 'True', 'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.State']", 'null': 'True', 'blank': 'True'}),
'state_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.StateType']"})
},
'doc.statetype': {
'Meta': {'object_name': 'StateType'},
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '30', 'primary_key': 'True'})
},
'doc.telechatdocevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'TelechatDocEvent', '_ormbases': ['doc.DocEvent']},
'docevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['doc.DocEvent']", 'unique': 'True', 'primary_key': 'True'}),
'returning_item': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'telechat_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'doc.writeupdocevent': {
'Meta': {'ordering': "['-time', '-id']", 'object_name': 'WriteupDocEvent', '_ormbases': ['doc.DocEvent']},
'docevent_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['doc.DocEvent']", 'unique': 'True', 'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'group.group': {
'Meta': {'object_name': 'Group'},
'acronym': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'ad': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Person']", 'null': 'True', 'blank': 'True'}),
'charter': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'chartered_group'", 'unique': 'True', 'null': 'True', 'to': "orm['doc.Document']"}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list_archive': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'list_email': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'list_subscribe': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['group.Group']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.GroupStateName']", 'null': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.GroupTypeName']", 'null': 'True'}),
'unused_states': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['doc.State']", 'symmetrical': 'False', 'blank': 'True'}),
'unused_tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['name.DocTagName']", 'symmetrical': 'False', 'blank': 'True'})
},
'name.ballotpositionname': {
'Meta': {'ordering': "['order']", 'object_name': 'BallotPositionName'},
'blocking': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.docrelationshipname': {
'Meta': {'ordering': "['order']", 'object_name': 'DocRelationshipName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.docremindertypename': {
'Meta': {'ordering': "['order']", 'object_name': 'DocReminderTypeName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.doctagname': {
'Meta': {'ordering': "['order']", 'object_name': 'DocTagName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.doctypename': {
'Meta': {'ordering': "['order']", 'object_name': 'DocTypeName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.groupstatename': {
'Meta': {'ordering': "['order']", 'object_name': 'GroupStateName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.grouptypename': {
'Meta': {'ordering': "['order']", 'object_name': 'GroupTypeName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.intendedstdlevelname': {
'Meta': {'ordering': "['order']", 'object_name': 'IntendedStdLevelName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.stdlevelname': {
'Meta': {'ordering': "['order']", 'object_name': 'StdLevelName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'name.streamname': {
'Meta': {'ordering': "['order']", 'object_name': 'StreamName'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'person.email': {
'Meta': {'object_name': 'Email'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '64', 'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Person']", 'null': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'person.person': {
'Meta': {'object_name': 'Person'},
'address': ('django.db.models.fields.TextField', [], {'max_length': '255', 'blank': 'True'}),
'affiliation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'ascii': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ascii_short': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['doc']
|
ftCommunity/ft-TXT | refs/heads/master | board/knobloch/TXT/board-support/ti-linux/tools/perf/scripts/python/netdev-times.py | 11271 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
|
SlimRoms/android_external_chromium_org | refs/heads/lp5.0 | tools/perf/benchmarks/start_with_url.py | 8 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import test
from measurements import startup
import page_sets
@test.Disabled('snowleopard') # crbug.com/336913
class StartWithUrlCold(test.Test):
"""Measure time to start Chrome cold with startup URLs"""
tag = 'cold'
test = startup.StartWithUrl
page_set = page_sets.StartupPagesPageSet
options = {'cold': True,
'pageset_repeat': 5}
class StartWithUrlWarm(test.Test):
"""Measure time to start Chrome warm with startup URLs"""
tag = 'warm'
test = startup.StartWithUrl
page_set = page_sets.StartupPagesPageSet
options = {'warm': True,
'pageset_repeat': 10}
|
Plain-Andy-legacy/android_external_chromium_org | refs/heads/lp-5.1r1 | tools/perf/page_sets/tough_filters_cases.py | 27 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class ToughFiltersCasesPage(page_module.Page):
def RunSmoothness(self, action_runner):
action_runner.Wait(10)
class PirateMarkPage(page_module.Page):
def RunSmoothness(self, action_runner):
action_runner.EvaluateJavaScript(
'document.getElementById("benchmarkButtonText").click()')
action_runner.Wait(10)
class ToughFiltersCasesPageSet(page_set_module.PageSet):
"""
Description: Self-driven filters animation examples
"""
def __init__(self):
super(ToughFiltersCasesPageSet, self).__init__(
archive_data_file='data/tough_filters_cases.json',
bucket=page_set_module.PARTNER_BUCKET)
urls_list = [
'http://letmespellitoutforyou.com/samples/svg/filter_terrain.svg',
'http://static.bobdo.net/Analog_Clock.svg',
]
for url in urls_list:
self.AddPage(ToughFiltersCasesPage(url, self))
self.AddPage(PirateMarkPage(
'http://ie.microsoft.com/testdrive/Performance/Pirates/', self))
|
HIIT/hybra-core | refs/heads/master | hybra/loaders/mediacloud.py | 1 | from . import common
def load( file = '' ):
## TODO: implement here hybra-core like caching and API management
for e in json.load( open( file ) ):
try:
d = {}
d['text_content'] = e['full_text_bow']
d['timestamp'] = common._text_to_datetime( e['publish_date'] )
d['source'] = e['media_name']
d['source_detail'] = e['publish_date']
yield data
except:
pass ## potentially breaks everything
|
ygenc/onlineLDA | refs/heads/master | onlineldavb_new/build/scipy/build/lib.macosx-10.6-intel-2.7/scipy/signal/windows.py | 5 | """The suite of window functions."""
import numpy as np
from scipy import special, linalg
from scipy.fftpack import fft
__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann',
'hamming', 'kaiser', 'gaussian', 'general_gaussian', 'chebwin',
'slepian', 'hann', 'get_window']
def boxcar(M, sym=True):
"""The M-point boxcar window.
"""
return np.ones(M, float)
def triang(M, sym=True):
"""The M-point triangular window.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(1, int((M + 1) / 2) + 1)
if M % 2 == 0:
w = (2 * n - 1.0) / M
w = np.r_[w, w[::-1]]
else:
w = 2 * n / (M + 1.0)
w = np.r_[w, w[-2::-1]]
if not sym and not odd:
w = w[:-1]
return w
def parzen(M, sym=True):
"""The M-point Parzen window.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(-(M - 1) / 2.0, (M - 1) / 2.0 + 0.5, 1.0)
na = np.extract(n < -(M - 1) / 4.0, n)
nb = np.extract(abs(n) <= (M - 1) / 4.0, n)
wa = 2 * (1 - np.abs(na) / (M / 2.0)) ** 3.0
wb = (1 - 6 * (np.abs(nb) / (M / 2.0)) ** 2.0 +
6 * (np.abs(nb) / (M / 2.0)) ** 3.0)
w = np.r_[wa, wb, wa[::-1]]
if not sym and not odd:
w = w[:-1]
return w
def bohman(M, sym=True):
"""The M-point Bohman window.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
fac = np.abs(np.linspace(-1, 1, M)[1:-1])
w = (1 - fac) * np.cos(np.pi * fac) + 1.0 / np.pi * np.sin(np.pi * fac)
w = np.r_[0, w, 0]
if not sym and not odd:
w = w[:-1]
return w
def blackman(M, sym=True):
"""The M-point Blackman window.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = (0.42 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1)) +
0.08 * np.cos(4.0 * np.pi * n / (M - 1)))
if not sym and not odd:
w = w[:-1]
return w
def nuttall(M, sym=True):
"""A minimum 4-term Blackman-Harris window according to Nuttall.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.3635819, 0.4891775, 0.1365995, 0.0106411]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def blackmanharris(M, sym=True):
"""The M-point minimum 4-term Blackman-Harris window.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.35875, 0.48829, 0.14128, 0.01168]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac))
if not sym and not odd:
w = w[:-1]
return w
def flattop(M, sym=True):
"""The M-point Flat top window.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
a = [0.2156, 0.4160, 0.2781, 0.0836, 0.0069]
n = np.arange(0, M)
fac = n * 2 * np.pi / (M - 1.0)
w = (a[0] - a[1] * np.cos(fac) +
a[2] * np.cos(2 * fac) - a[3] * np.cos(3 * fac) +
a[4] * np.cos(4 * fac))
if not sym and not odd:
w = w[:-1]
return w
def bartlett(M, sym=True):
"""The M-point Bartlett window.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = np.where(np.less_equal(n, (M - 1) / 2.0),
2.0 * n / (M - 1), 2.0 - 2.0 * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def hanning(M, sym=True):
"""The M-point Hanning window.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.5 - 0.5 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
hann = hanning
def barthann(M, sym=True):
"""Return the M-point modified Bartlett-Hann window.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
fac = np.abs(n / (M - 1.0) - 0.5)
w = 0.62 - 0.48 * fac + 0.38 * np.cos(2 * np.pi * fac)
if not sym and not odd:
w = w[:-1]
return w
def hamming(M, sym=True):
"""The M-point Hamming window.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
w = 0.54 - 0.46 * np.cos(2.0 * np.pi * n / (M - 1))
if not sym and not odd:
w = w[:-1]
return w
def kaiser(M, beta, sym=True):
"""Return a Kaiser window of length M with shape parameter beta.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M)
alpha = (M - 1) / 2.0
w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)) /
special.i0(beta))
if not sym and not odd:
w = w[:-1]
return w
def gaussian(M, std, sym=True):
"""Return a Gaussian window of length M with standard-deviation std.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
sig2 = 2 * std * std
w = np.exp(-n ** 2 / sig2)
if not sym and not odd:
w = w[:-1]
return w
def general_gaussian(M, p, sig, sym=True):
"""Return a window with a generalized Gaussian shape.
The Gaussian shape is defined as ``exp(-0.5*(x/sig)**(2*p))``, the
half-power point is at ``(2*log(2)))**(1/(2*p)) * sig``.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
n = np.arange(0, M) - (M - 1.0) / 2.0
w = np.exp(-0.5 * (n / sig) ** (2 * p))
if not sym and not odd:
w = w[:-1]
return w
# `chebwin` contributed by Kumar Appaiah.
def chebwin(M, at, sym=True):
"""Dolph-Chebyshev window.
Parameters
----------
M : int
Window size.
at : float
Attenuation (in dB).
sym : bool
Generates symmetric window if True.
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
# compute the parameter beta
order = M - 1.0
beta = np.cosh(1.0 / order * np.arccosh(10 ** (np.abs(at) / 20.)))
k = np.r_[0:M] * 1.0
x = beta * np.cos(np.pi * k / M)
# Find the window's DFT coefficients
# Use analytic definition of Chebyshev polynomial instead of expansion
# from scipy.special. Using the expansion in scipy.special leads to errors.
p = np.zeros(x.shape)
p[x > 1] = np.cosh(order * np.arccosh(x[x > 1]))
p[x < -1] = (1 - 2 * (order % 2)) * np.cosh(order * np.arccosh(-x[x < -1]))
p[np.abs(x) <= 1] = np.cos(order * np.arccos(x[np.abs(x) <= 1]))
# Appropriate IDFT and filling up
# depending on even/odd M
if M % 2:
w = np.real(fft(p))
n = (M + 1) / 2
w = w[:n] / w[0]
w = np.concatenate((w[n - 1:0:-1], w))
else:
p = p * np.exp(1.j * np.pi / M * np.r_[0:M])
w = np.real(fft(p))
n = M / 2 + 1
w = w / w[1]
w = np.concatenate((w[n - 1:0:-1], w[1:n]))
if not sym and not odd:
w = w[:-1]
return w
def slepian(M, width, sym=True):
"""Return the M-point slepian window.
"""
if (M * width > 27.38):
raise ValueError("Cannot reliably obtain slepian sequences for"
" M*width > 27.38.")
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, 'd')
odd = M % 2
if not sym and not odd:
M = M + 1
twoF = width / 2.0
alpha = (M - 1) / 2.0
m = np.arange(0, M) - alpha
n = m[:, np.newaxis]
k = m[np.newaxis, :]
AF = twoF * special.sinc(twoF * (n - k))
[lam, vec] = linalg.eig(AF)
ind = np.argmax(abs(lam), axis=-1)
w = np.abs(vec[:, ind])
w = w / max(w)
if not sym and not odd:
w = w[:-1]
return w
def get_window(window, Nx, fftbins=True):
"""
Return a window of length `Nx` and type `window`.
Parameters
----------
window : string, float, or tuple
The type of window to create. See below for more details.
Nx : int
The number of samples in the window.
fftbins : bool, optional
If True, create a "periodic" window ready to use with ifftshift
and be multiplied by the result of an fft (SEE ALSO fftfreq).
Notes
-----
Window types:
boxcar, triang, blackman, hamming, hanning, bartlett,
parzen, bohman, blackmanharris, nuttall, barthann,
kaiser (needs beta), gaussian (needs std),
general_gaussian (needs power, width),
slepian (needs width), chebwin (needs attenuation)
If the window requires no parameters, then `window` can be a string.
If the window requires parameters, then `window` must be a tuple
with the first argument the string name of the window, and the next
arguments the needed parameters.
If `window` is a floating point number, it is interpreted as the beta
parameter of the kaiser window.
Each of the window types listed above is also the name of
a function that can be called directly to create a window of
that type.
Examples
--------
>>> get_window('triang', 7)
array([ 0.25, 0.5 , 0.75, 1. , 0.75, 0.5 , 0.25])
>>> get_window(('kaiser', 4.0), 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
>>> get_window(4.0, 9)
array([ 0.08848053, 0.32578323, 0.63343178, 0.89640418, 1. ,
0.89640418, 0.63343178, 0.32578323, 0.08848053])
"""
sym = not fftbins
try:
beta = float(window)
except (TypeError, ValueError):
args = ()
if isinstance(window, tuple):
winstr = window[0]
if len(window) > 1:
args = window[1:]
elif isinstance(window, str):
if window in ['kaiser', 'ksr', 'gaussian', 'gauss', 'gss',
'general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs',
'slepian', 'optimal', 'slep', 'dss',
'chebwin', 'cheb']:
raise ValueError("The '" + window + "' window needs one or "
"more parameters -- pass a tuple.")
else:
winstr = window
if winstr in ['blackman', 'black', 'blk']:
winfunc = blackman
elif winstr in ['triangle', 'triang', 'tri']:
winfunc = triang
elif winstr in ['hamming', 'hamm', 'ham']:
winfunc = hamming
elif winstr in ['bartlett', 'bart', 'brt']:
winfunc = bartlett
elif winstr in ['hanning', 'hann', 'han']:
winfunc = hanning
elif winstr in ['blackmanharris', 'blackharr', 'bkh']:
winfunc = blackmanharris
elif winstr in ['parzen', 'parz', 'par']:
winfunc = parzen
elif winstr in ['bohman', 'bman', 'bmn']:
winfunc = bohman
elif winstr in ['nuttall', 'nutl', 'nut']:
winfunc = nuttall
elif winstr in ['barthann', 'brthan', 'bth']:
winfunc = barthann
elif winstr in ['flattop', 'flat', 'flt']:
winfunc = flattop
elif winstr in ['kaiser', 'ksr']:
winfunc = kaiser
elif winstr in ['gaussian', 'gauss', 'gss']:
winfunc = gaussian
elif winstr in ['general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs']:
winfunc = general_gaussian
elif winstr in ['boxcar', 'box', 'ones']:
winfunc = boxcar
elif winstr in ['slepian', 'slep', 'optimal', 'dss']:
winfunc = slepian
elif winstr in ['chebwin', 'cheb']:
winfunc = chebwin
else:
raise ValueError("Unknown window type.")
params = (Nx,) + args + (sym,)
else:
winfunc = kaiser
params = (Nx, beta, sym)
return winfunc(*params)
|
palfrey/coherence | refs/heads/master | coherence/upnp/devices/binary_light_client.py | 1 | # Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2008, Frank Scholz <coherence@beebits.net>
from coherence.upnp.services.clients.switch_power_client import SwitchPowerClient
from coherence import log
import coherence.extern.louie as louie
class BinaryLightClient(log.Loggable):
logCategory = 'binarylight_client'
def __init__(self, device):
self.device = device
self.device_type,self.version = device.get_device_type().split(':')[3:5]
self.icons = device.icons
self.switch_power = None
self.detection_completed = False
louie.connect(self.service_notified, signal='Coherence.UPnP.DeviceClient.Service.notified', sender=self.device)
for service in self.device.get_services():
if service.get_type() in ["urn:schemas-upnp-org:service:SwitchPower:1"]:
self.switch_power = SwitchPowerClient(service)
self.info("BinaryLight %s" % (self.device.get_friendly_name()))
if self.switch_power:
self.info("SwitchPower service available")
else:
self.warning("SwitchPower service not available, device not implemented properly according to the UPnP specification")
def remove(self):
self.info("removal of BinaryLightClient started")
if self.switch_power != None:
self.switch_power.remove()
def service_notified(self, service):
self.info("Service %r sent notification" % service);
if self.detection_completed == True:
return
if self.switch_power != None:
if not hasattr(self.switch_power.service, 'last_time_updated'):
return
if self.switch_power.service.last_time_updated == None:
return
self.detection_completed = True
louie.send('Coherence.UPnP.DeviceClient.detection_completed', None,
client=self,udn=self.device.udn)
def state_variable_change( self, variable):
self.info(variable.name, 'changed from', variable.old_value, 'to', variable.value)
|
EliotBerriot/django | refs/heads/master | tests/admin_utils/tests.py | 107 | from __future__ import unicode_literals
from datetime import datetime
from decimal import Decimal
from django import forms
from django.conf import settings
from django.contrib import admin
from django.contrib.admin import helpers
from django.contrib.admin.utils import (
NestedObjects, display_for_field, flatten, flatten_fieldsets,
label_for_field, lookup_field, quote,
)
from django.db import DEFAULT_DB_ALIAS, models
from django.test import SimpleTestCase, TestCase, override_settings
from django.utils import six
from django.utils.formats import localize
from django.utils.safestring import mark_safe
from .models import (
Article, Car, Count, Event, EventGuide, Location, Site, Vehicle,
)
class NestedObjectsTests(TestCase):
"""
Tests for ``NestedObject`` utility collection.
"""
def setUp(self):
self.n = NestedObjects(using=DEFAULT_DB_ALIAS)
self.objs = [Count.objects.create(num=i) for i in range(5)]
def _check(self, target):
self.assertEqual(self.n.nested(lambda obj: obj.num), target)
def _connect(self, i, j):
self.objs[i].parent = self.objs[j]
self.objs[i].save()
def _collect(self, *indices):
self.n.collect([self.objs[i] for i in indices])
def test_unrelated_roots(self):
self._connect(2, 1)
self._collect(0)
self._collect(1)
self._check([0, 1, [2]])
def test_siblings(self):
self._connect(1, 0)
self._connect(2, 0)
self._collect(0)
self._check([0, [1, 2]])
def test_non_added_parent(self):
self._connect(0, 1)
self._collect(0)
self._check([0])
def test_cyclic(self):
self._connect(0, 2)
self._connect(1, 0)
self._connect(2, 1)
self._collect(0)
self._check([0, [1, [2]]])
def test_queries(self):
self._connect(1, 0)
self._connect(2, 0)
# 1 query to fetch all children of 0 (1 and 2)
# 1 query to fetch all children of 1 and 2 (none)
# Should not require additional queries to populate the nested graph.
self.assertNumQueries(2, self._collect, 0)
def test_on_delete_do_nothing(self):
"""
Check that the nested collector doesn't query for DO_NOTHING objects.
"""
n = NestedObjects(using=DEFAULT_DB_ALIAS)
objs = [Event.objects.create()]
EventGuide.objects.create(event=objs[0])
with self.assertNumQueries(2):
# One for Location, one for Guest, and no query for EventGuide
n.collect(objs)
def test_relation_on_abstract(self):
"""
#21846 -- Check that `NestedObjects.collect()` doesn't trip
(AttributeError) on the special notation for relations on abstract
models (related_name that contains %(app_label)s and/or %(class)s).
"""
n = NestedObjects(using=DEFAULT_DB_ALIAS)
Car.objects.create()
n.collect([Vehicle.objects.first()])
class UtilsTests(SimpleTestCase):
empty_value = '-empty-'
def test_values_from_lookup_field(self):
"""
Regression test for #12654: lookup_field
"""
SITE_NAME = 'example.com'
TITLE_TEXT = 'Some title'
CREATED_DATE = datetime.min
ADMIN_METHOD = 'admin method'
SIMPLE_FUNCTION = 'function'
INSTANCE_ATTRIBUTE = 'attr'
class MockModelAdmin(object):
def get_admin_value(self, obj):
return ADMIN_METHOD
simple_function = lambda obj: SIMPLE_FUNCTION
site_obj = Site(domain=SITE_NAME)
article = Article(
site=site_obj,
title=TITLE_TEXT,
created=CREATED_DATE,
)
article.non_field = INSTANCE_ATTRIBUTE
verifications = (
('site', SITE_NAME),
('created', localize(CREATED_DATE)),
('title', TITLE_TEXT),
('get_admin_value', ADMIN_METHOD),
(simple_function, SIMPLE_FUNCTION),
('test_from_model', article.test_from_model()),
('non_field', INSTANCE_ATTRIBUTE)
)
mock_admin = MockModelAdmin()
for name, value in verifications:
field, attr, resolved_value = lookup_field(name, article, mock_admin)
if field is not None:
resolved_value = display_for_field(resolved_value, field, self.empty_value)
self.assertEqual(value, resolved_value)
def test_null_display_for_field(self):
"""
Regression test for #12550: display_for_field should handle None
value.
"""
display_value = display_for_field(None, models.CharField(), self.empty_value)
self.assertEqual(display_value, self.empty_value)
display_value = display_for_field(None, models.CharField(
choices=(
(None, "test_none"),
)
), self.empty_value)
self.assertEqual(display_value, "test_none")
display_value = display_for_field(None, models.DateField(), self.empty_value)
self.assertEqual(display_value, self.empty_value)
display_value = display_for_field(None, models.TimeField(), self.empty_value)
self.assertEqual(display_value, self.empty_value)
# Regression test for #13071: NullBooleanField has special
# handling.
display_value = display_for_field(None, models.NullBooleanField(), self.empty_value)
expected = '<img src="%sadmin/img/icon-unknown.svg" alt="None" />' % settings.STATIC_URL
self.assertHTMLEqual(display_value, expected)
display_value = display_for_field(None, models.DecimalField(), self.empty_value)
self.assertEqual(display_value, self.empty_value)
display_value = display_for_field(None, models.FloatField(), self.empty_value)
self.assertEqual(display_value, self.empty_value)
def test_number_formats_display_for_field(self):
display_value = display_for_field(12345.6789, models.FloatField(), self.empty_value)
self.assertEqual(display_value, '12345.6789')
display_value = display_for_field(Decimal('12345.6789'), models.DecimalField(), self.empty_value)
self.assertEqual(display_value, '12345.6789')
display_value = display_for_field(12345, models.IntegerField(), self.empty_value)
self.assertEqual(display_value, '12345')
@override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True)
def test_number_formats_with_thousand_seperator_display_for_field(self):
display_value = display_for_field(12345.6789, models.FloatField(), self.empty_value)
self.assertEqual(display_value, '12,345.6789')
display_value = display_for_field(Decimal('12345.6789'), models.DecimalField(), self.empty_value)
self.assertEqual(display_value, '12,345.6789')
display_value = display_for_field(12345, models.IntegerField(), self.empty_value)
self.assertEqual(display_value, '12,345')
def test_label_for_field(self):
"""
Tests for label_for_field
"""
self.assertEqual(
label_for_field("title", Article),
"title"
)
self.assertEqual(
label_for_field("title2", Article),
"another name"
)
self.assertEqual(
label_for_field("title2", Article, return_attr=True),
("another name", None)
)
self.assertEqual(
label_for_field("__unicode__", Article),
"article"
)
self.assertEqual(
label_for_field("__str__", Article),
str("article")
)
self.assertRaises(
AttributeError,
lambda: label_for_field("unknown", Article)
)
def test_callable(obj):
return "nothing"
self.assertEqual(
label_for_field(test_callable, Article),
"Test callable"
)
self.assertEqual(
label_for_field(test_callable, Article, return_attr=True),
("Test callable", test_callable)
)
self.assertEqual(
label_for_field("test_from_model", Article),
"Test from model"
)
self.assertEqual(
label_for_field("test_from_model", Article, return_attr=True),
("Test from model", Article.test_from_model)
)
self.assertEqual(
label_for_field("test_from_model_with_override", Article),
"not What you Expect"
)
self.assertEqual(
label_for_field(lambda x: "nothing", Article),
"--"
)
class MockModelAdmin(object):
def test_from_model(self, obj):
return "nothing"
test_from_model.short_description = "not Really the Model"
self.assertEqual(
label_for_field("test_from_model", Article, model_admin=MockModelAdmin),
"not Really the Model"
)
self.assertEqual(
label_for_field("test_from_model", Article,
model_admin=MockModelAdmin,
return_attr=True),
("not Really the Model", MockModelAdmin.test_from_model)
)
def test_label_for_property(self):
# NOTE: cannot use @property decorator, because of
# AttributeError: 'property' object has no attribute 'short_description'
class MockModelAdmin(object):
def my_property(self):
return "this if from property"
my_property.short_description = 'property short description'
test_from_property = property(my_property)
self.assertEqual(
label_for_field("test_from_property", Article, model_admin=MockModelAdmin),
'property short description'
)
def test_related_name(self):
"""
Regression test for #13963
"""
self.assertEqual(
label_for_field('location', Event, return_attr=True),
('location', None),
)
self.assertEqual(
label_for_field('event', Location, return_attr=True),
('awesome event', None),
)
self.assertEqual(
label_for_field('guest', Event, return_attr=True),
('awesome guest', None),
)
def test_logentry_unicode(self):
"""
Regression test for #15661
"""
log_entry = admin.models.LogEntry()
log_entry.action_flag = admin.models.ADDITION
self.assertTrue(
six.text_type(log_entry).startswith('Added ')
)
log_entry.action_flag = admin.models.CHANGE
self.assertTrue(
six.text_type(log_entry).startswith('Changed ')
)
log_entry.action_flag = admin.models.DELETION
self.assertTrue(
six.text_type(log_entry).startswith('Deleted ')
)
# Make sure custom action_flags works
log_entry.action_flag = 4
self.assertEqual(six.text_type(log_entry), 'LogEntry Object')
def test_safestring_in_field_label(self):
# safestring should not be escaped
class MyForm(forms.Form):
text = forms.CharField(label=mark_safe('<i>text</i>'))
cb = forms.BooleanField(label=mark_safe('<i>cb</i>'))
form = MyForm()
self.assertHTMLEqual(helpers.AdminField(form, 'text', is_first=False).label_tag(),
'<label for="id_text" class="required inline"><i>text</i>:</label>')
self.assertHTMLEqual(helpers.AdminField(form, 'cb', is_first=False).label_tag(),
'<label for="id_cb" class="vCheckboxLabel required inline"><i>cb</i></label>')
# normal strings needs to be escaped
class MyForm(forms.Form):
text = forms.CharField(label='&text')
cb = forms.BooleanField(label='&cb')
form = MyForm()
self.assertHTMLEqual(helpers.AdminField(form, 'text', is_first=False).label_tag(),
'<label for="id_text" class="required inline">&text:</label>')
self.assertHTMLEqual(helpers.AdminField(form, 'cb', is_first=False).label_tag(),
'<label for="id_cb" class="vCheckboxLabel required inline">&cb</label>')
def test_flatten(self):
flat_all = ['url', 'title', 'content', 'sites']
inputs = (
((), []),
(('url', 'title', ('content', 'sites')), flat_all),
(('url', 'title', 'content', 'sites'), flat_all),
((('url', 'title'), ('content', 'sites')), flat_all)
)
for orig, expected in inputs:
self.assertEqual(flatten(orig), expected)
def test_flatten_fieldsets(self):
"""
Regression test for #18051
"""
fieldsets = (
(None, {
'fields': ('url', 'title', ('content', 'sites'))
}),
)
self.assertEqual(flatten_fieldsets(fieldsets), ['url', 'title', 'content', 'sites'])
fieldsets = (
(None, {
'fields': ('url', 'title', ['content', 'sites'])
}),
)
self.assertEqual(flatten_fieldsets(fieldsets), ['url', 'title', 'content', 'sites'])
def test_quote(self):
self.assertEqual(quote('something\nor\nother'), 'something_0Aor_0Aother')
|
quantopian/zipline | refs/heads/master | zipline/utils/math_utils.py | 1 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from decimal import Decimal
import math
from numpy import isnan
def tolerant_equals(a, b, atol=10e-7, rtol=10e-7, equal_nan=False):
"""Check if a and b are equal with some tolerance.
Parameters
----------
a, b : float
The floats to check for equality.
atol : float, optional
The absolute tolerance.
rtol : float, optional
The relative tolerance.
equal_nan : bool, optional
Should NaN compare equal?
See Also
--------
numpy.isclose
Notes
-----
This function is just a scalar version of numpy.isclose for performance.
See the docstring of ``isclose`` for more information about ``atol`` and
``rtol``.
"""
if equal_nan and isnan(a) and isnan(b):
return True
return math.fabs(a - b) <= (atol + rtol * math.fabs(b))
try:
# fast versions
import bottleneck as bn
nanmean = bn.nanmean
nanstd = bn.nanstd
nansum = bn.nansum
nanmax = bn.nanmax
nanmin = bn.nanmin
nanargmax = bn.nanargmax
nanargmin = bn.nanargmin
nanmedian = bn.nanmedian
except ImportError:
# slower numpy
import numpy as np
nanmean = np.nanmean
nanstd = np.nanstd
nansum = np.nansum
nanmax = np.nanmax
nanmin = np.nanmin
nanargmax = np.nanargmax
nanargmin = np.nanargmin
nanmedian = np.nanmedian
def round_if_near_integer(a, epsilon=1e-4):
"""
Round a to the nearest integer if that integer is within an epsilon
of a.
"""
if abs(a - round(a)) <= epsilon:
return round(a)
else:
return a
def number_of_decimal_places(n):
"""
Compute the number of decimal places in a number.
Examples
--------
>>> number_of_decimal_places(1)
0
>>> number_of_decimal_places(3.14)
2
>>> number_of_decimal_places('3.14')
2
"""
decimal = Decimal(str(n))
return -decimal.as_tuple().exponent
|
HiroIshikawa/21playground | refs/heads/master | thumblelog/myproject/lib/python3.5/site-packages/werkzeug/utils.py | 345 | # -*- coding: utf-8 -*-
"""
werkzeug.utils
~~~~~~~~~~~~~~
This module implements various utilities for WSGI applications. Most of
them are used by the request and response wrappers but especially for
middleware development it makes sense to use them without the wrappers.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import os
import sys
import pkgutil
try:
from html.entities import name2codepoint
except ImportError:
from htmlentitydefs import name2codepoint
from werkzeug._compat import unichr, text_type, string_types, iteritems, \
reraise, PY2
from werkzeug._internal import _DictAccessorProperty, \
_parse_signature, _missing
_format_re = re.compile(r'\$(?:(%s)|\{(%s)\})' % (('[a-zA-Z_][a-zA-Z0-9_]*',) * 2))
_entity_re = re.compile(r'&([^;]+);')
_filename_ascii_strip_re = re.compile(r'[^A-Za-z0-9_.-]')
_windows_device_files = ('CON', 'AUX', 'COM1', 'COM2', 'COM3', 'COM4', 'LPT1',
'LPT2', 'LPT3', 'PRN', 'NUL')
class cached_property(property):
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value::
class Foo(object):
@cached_property
def foo(self):
# calculate something important here
return 42
The class has to have a `__dict__` in order for this property to
work.
"""
# implementation detail: A subclass of python's builtin property
# decorator, we override __get__ to check for a cached value. If one
# choses to invoke __get__ by hand the property will still work as
# expected because the lookup logic is replicated in __get__ for
# manual invocation.
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
def __set__(self, obj, value):
obj.__dict__[self.__name__] = value
def __get__(self, obj, type=None):
if obj is None:
return self
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
class environ_property(_DictAccessorProperty):
"""Maps request attributes to environment variables. This works not only
for the Werzeug request object, but also any other class with an
environ attribute:
>>> class Test(object):
... environ = {'key': 'value'}
... test = environ_property('key')
>>> var = Test()
>>> var.test
'value'
If you pass it a second value it's used as default if the key does not
exist, the third one can be a converter that takes a value and converts
it. If it raises :exc:`ValueError` or :exc:`TypeError` the default value
is used. If no default value is provided `None` is used.
Per default the property is read only. You have to explicitly enable it
by passing ``read_only=False`` to the constructor.
"""
read_only = True
def lookup(self, obj):
return obj.environ
class header_property(_DictAccessorProperty):
"""Like `environ_property` but for headers."""
def lookup(self, obj):
return obj.headers
class HTMLBuilder(object):
"""Helper object for HTML generation.
Per default there are two instances of that class. The `html` one, and
the `xhtml` one for those two dialects. The class uses keyword parameters
and positional parameters to generate small snippets of HTML.
Keyword parameters are converted to XML/SGML attributes, positional
arguments are used as children. Because Python accepts positional
arguments before keyword arguments it's a good idea to use a list with the
star-syntax for some children:
>>> html.p(class_='foo', *[html.a('foo', href='foo.html'), ' ',
... html.a('bar', href='bar.html')])
u'<p class="foo"><a href="foo.html">foo</a> <a href="bar.html">bar</a></p>'
This class works around some browser limitations and can not be used for
arbitrary SGML/XML generation. For that purpose lxml and similar
libraries exist.
Calling the builder escapes the string passed:
>>> html.p(html("<foo>"))
u'<p><foo></p>'
"""
_entity_re = re.compile(r'&([^;]+);')
_entities = name2codepoint.copy()
_entities['apos'] = 39
_empty_elements = set([
'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
'hr', 'img', 'input', 'keygen', 'isindex', 'link', 'meta', 'param',
'source', 'wbr'
])
_boolean_attributes = set([
'selected', 'checked', 'compact', 'declare', 'defer', 'disabled',
'ismap', 'multiple', 'nohref', 'noresize', 'noshade', 'nowrap'
])
_plaintext_elements = set(['textarea'])
_c_like_cdata = set(['script', 'style'])
def __init__(self, dialect):
self._dialect = dialect
def __call__(self, s):
return escape(s)
def __getattr__(self, tag):
if tag[:2] == '__':
raise AttributeError(tag)
def proxy(*children, **arguments):
buffer = '<' + tag
for key, value in iteritems(arguments):
if value is None:
continue
if key[-1] == '_':
key = key[:-1]
if key in self._boolean_attributes:
if not value:
continue
if self._dialect == 'xhtml':
value = '="' + key + '"'
else:
value = ''
else:
value = '="' + escape(value) + '"'
buffer += ' ' + key + value
if not children and tag in self._empty_elements:
if self._dialect == 'xhtml':
buffer += ' />'
else:
buffer += '>'
return buffer
buffer += '>'
children_as_string = ''.join([text_type(x) for x in children
if x is not None])
if children_as_string:
if tag in self._plaintext_elements:
children_as_string = escape(children_as_string)
elif tag in self._c_like_cdata and self._dialect == 'xhtml':
children_as_string = '/*<![CDATA[*/' + \
children_as_string + '/*]]>*/'
buffer += children_as_string + '</' + tag + '>'
return buffer
return proxy
def __repr__(self):
return '<%s for %r>' % (
self.__class__.__name__,
self._dialect
)
html = HTMLBuilder('html')
xhtml = HTMLBuilder('xhtml')
def get_content_type(mimetype, charset):
"""Returns the full content type string with charset for a mimetype.
If the mimetype represents text the charset will be appended as charset
parameter, otherwise the mimetype is returned unchanged.
:param mimetype: the mimetype to be used as content type.
:param charset: the charset to be appended in case it was a text mimetype.
:return: the content type.
"""
if mimetype.startswith('text/') or \
mimetype == 'application/xml' or \
(mimetype.startswith('application/') and
mimetype.endswith('+xml')):
mimetype += '; charset=' + charset
return mimetype
def format_string(string, context):
"""String-template format a string:
>>> format_string('$foo and ${foo}s', dict(foo=42))
'42 and 42s'
This does not do any attribute lookup etc. For more advanced string
formattings have a look at the `werkzeug.template` module.
:param string: the format string.
:param context: a dict with the variables to insert.
"""
def lookup_arg(match):
x = context[match.group(1) or match.group(2)]
if not isinstance(x, string_types):
x = type(string)(x)
return x
return _format_re.sub(lookup_arg, string)
def secure_filename(filename):
r"""Pass it a filename and it will return a secure version of it. This
filename can then safely be stored on a regular file system and passed
to :func:`os.path.join`. The filename returned is an ASCII only string
for maximum portability.
On windows systems the function also makes sure that the file is not
named after one of the special device files.
>>> secure_filename("My cool movie.mov")
'My_cool_movie.mov'
>>> secure_filename("../../../etc/passwd")
'etc_passwd'
>>> secure_filename(u'i contain cool \xfcml\xe4uts.txt')
'i_contain_cool_umlauts.txt'
The function might return an empty filename. It's your responsibility
to ensure that the filename is unique and that you generate random
filename if the function returned an empty one.
.. versionadded:: 0.5
:param filename: the filename to secure
"""
if isinstance(filename, text_type):
from unicodedata import normalize
filename = normalize('NFKD', filename).encode('ascii', 'ignore')
if not PY2:
filename = filename.decode('ascii')
for sep in os.path.sep, os.path.altsep:
if sep:
filename = filename.replace(sep, ' ')
filename = str(_filename_ascii_strip_re.sub('', '_'.join(
filename.split()))).strip('._')
# on nt a couple of special files are present in each folder. We
# have to ensure that the target file is not such a filename. In
# this case we prepend an underline
if os.name == 'nt' and filename and \
filename.split('.')[0].upper() in _windows_device_files:
filename = '_' + filename
return filename
def escape(s, quote=None):
"""Replace special characters "&", "<", ">" and (") to HTML-safe sequences.
There is a special handling for `None` which escapes to an empty string.
.. versionchanged:: 0.9
`quote` is now implicitly on.
:param s: the string to escape.
:param quote: ignored.
"""
if s is None:
return ''
elif hasattr(s, '__html__'):
return text_type(s.__html__())
elif not isinstance(s, string_types):
s = text_type(s)
if quote is not None:
from warnings import warn
warn(DeprecationWarning('quote parameter is implicit now'), stacklevel=2)
s = s.replace('&', '&').replace('<', '<') \
.replace('>', '>').replace('"', """)
return s
def unescape(s):
"""The reverse function of `escape`. This unescapes all the HTML
entities, not only the XML entities inserted by `escape`.
:param s: the string to unescape.
"""
def handle_match(m):
name = m.group(1)
if name in HTMLBuilder._entities:
return unichr(HTMLBuilder._entities[name])
try:
if name[:2] in ('#x', '#X'):
return unichr(int(name[2:], 16))
elif name.startswith('#'):
return unichr(int(name[1:]))
except ValueError:
pass
return u''
return _entity_re.sub(handle_match, s)
def redirect(location, code=302, Response=None):
"""Returns a response object (a WSGI application) that, if called,
redirects the client to the target location. Supported codes are 301,
302, 303, 305, and 307. 300 is not supported because it's not a real
redirect and 304 because it's the answer for a request with a request
with defined If-Modified-Since headers.
.. versionadded:: 0.6
The location can now be a unicode string that is encoded using
the :func:`iri_to_uri` function.
.. versionadded:: 0.10
The class used for the Response object can now be passed in.
:param location: the location the response should redirect to.
:param code: the redirect status code. defaults to 302.
:param class Response: a Response class to use when instantiating a
response. The default is :class:`werkzeug.wrappers.Response` if
unspecified.
"""
if Response is None:
from werkzeug.wrappers import Response
display_location = escape(location)
if isinstance(location, text_type):
# Safe conversion is necessary here as we might redirect
# to a broken URI scheme (for instance itms-services).
from werkzeug.urls import iri_to_uri
location = iri_to_uri(location, safe_conversion=True)
response = Response(
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
'<title>Redirecting...</title>\n'
'<h1>Redirecting...</h1>\n'
'<p>You should be redirected automatically to target URL: '
'<a href="%s">%s</a>. If not click the link.' %
(escape(location), display_location), code, mimetype='text/html')
response.headers['Location'] = location
return response
def append_slash_redirect(environ, code=301):
"""Redirects to the same URL but with a slash appended. The behavior
of this function is undefined if the path ends with a slash already.
:param environ: the WSGI environment for the request that triggers
the redirect.
:param code: the status code for the redirect.
"""
new_path = environ['PATH_INFO'].strip('/') + '/'
query_string = environ.get('QUERY_STRING')
if query_string:
new_path += '?' + query_string
return redirect(new_path, code)
def import_string(import_name, silent=False):
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If `silent` is True the return value will be `None` if the import fails.
:param import_name: the dotted name for the object to import.
:param silent: if set to `True` import errors are ignored and
`None` is returned instead.
:return: imported object
"""
# force the import name to automatically convert to strings
# __import__ is not able to handle unicode strings in the fromlist
# if the module is a package
import_name = str(import_name).replace(':', '.')
try:
try:
__import__(import_name)
except ImportError:
if '.' not in import_name:
raise
else:
return sys.modules[import_name]
module_name, obj_name = import_name.rsplit('.', 1)
try:
module = __import__(module_name, None, None, [obj_name])
except ImportError:
# support importing modules not yet set up by the parent module
# (or package for that matter)
module = import_string(module_name)
try:
return getattr(module, obj_name)
except AttributeError as e:
raise ImportError(e)
except ImportError as e:
if not silent:
reraise(
ImportStringError,
ImportStringError(import_name, e),
sys.exc_info()[2])
def find_modules(import_path, include_packages=False, recursive=False):
"""Finds all the modules below a package. This can be useful to
automatically import all views / controllers so that their metaclasses /
function decorators have a chance to register themselves on the
application.
Packages are not returned unless `include_packages` is `True`. This can
also recursively list modules but in that case it will import all the
packages to get the correct load path of that module.
:param import_name: the dotted name for the package to find child modules.
:param include_packages: set to `True` if packages should be returned, too.
:param recursive: set to `True` if recursion should happen.
:return: generator
"""
module = import_string(import_path)
path = getattr(module, '__path__', None)
if path is None:
raise ValueError('%r is not a package' % import_path)
basename = module.__name__ + '.'
for importer, modname, ispkg in pkgutil.iter_modules(path):
modname = basename + modname
if ispkg:
if include_packages:
yield modname
if recursive:
for item in find_modules(modname, include_packages, True):
yield item
else:
yield modname
def validate_arguments(func, args, kwargs, drop_extra=True):
"""Checks if the function accepts the arguments and keyword arguments.
Returns a new ``(args, kwargs)`` tuple that can safely be passed to
the function without causing a `TypeError` because the function signature
is incompatible. If `drop_extra` is set to `True` (which is the default)
any extra positional or keyword arguments are dropped automatically.
The exception raised provides three attributes:
`missing`
A set of argument names that the function expected but where
missing.
`extra`
A dict of keyword arguments that the function can not handle but
where provided.
`extra_positional`
A list of values that where given by positional argument but the
function cannot accept.
This can be useful for decorators that forward user submitted data to
a view function::
from werkzeug.utils import ArgumentValidationError, validate_arguments
def sanitize(f):
def proxy(request):
data = request.values.to_dict()
try:
args, kwargs = validate_arguments(f, (request,), data)
except ArgumentValidationError:
raise BadRequest('The browser failed to transmit all '
'the data expected.')
return f(*args, **kwargs)
return proxy
:param func: the function the validation is performed against.
:param args: a tuple of positional arguments.
:param kwargs: a dict of keyword arguments.
:param drop_extra: set to `False` if you don't want extra arguments
to be silently dropped.
:return: tuple in the form ``(args, kwargs)``.
"""
parser = _parse_signature(func)
args, kwargs, missing, extra, extra_positional = parser(args, kwargs)[:5]
if missing:
raise ArgumentValidationError(tuple(missing))
elif (extra or extra_positional) and not drop_extra:
raise ArgumentValidationError(None, extra, extra_positional)
return tuple(args), kwargs
def bind_arguments(func, args, kwargs):
"""Bind the arguments provided into a dict. When passed a function,
a tuple of arguments and a dict of keyword arguments `bind_arguments`
returns a dict of names as the function would see it. This can be useful
to implement a cache decorator that uses the function arguments to build
the cache key based on the values of the arguments.
:param func: the function the arguments should be bound for.
:param args: tuple of positional arguments.
:param kwargs: a dict of keyword arguments.
:return: a :class:`dict` of bound keyword arguments.
"""
args, kwargs, missing, extra, extra_positional, \
arg_spec, vararg_var, kwarg_var = _parse_signature(func)(args, kwargs)
values = {}
for (name, has_default, default), value in zip(arg_spec, args):
values[name] = value
if vararg_var is not None:
values[vararg_var] = tuple(extra_positional)
elif extra_positional:
raise TypeError('too many positional arguments')
if kwarg_var is not None:
multikw = set(extra) & set([x[0] for x in arg_spec])
if multikw:
raise TypeError('got multiple values for keyword argument ' +
repr(next(iter(multikw))))
values[kwarg_var] = extra
elif extra:
raise TypeError('got unexpected keyword argument ' +
repr(next(iter(extra))))
return values
class ArgumentValidationError(ValueError):
"""Raised if :func:`validate_arguments` fails to validate"""
def __init__(self, missing=None, extra=None, extra_positional=None):
self.missing = set(missing or ())
self.extra = extra or {}
self.extra_positional = extra_positional or []
ValueError.__init__(self, 'function arguments invalid. ('
'%d missing, %d additional)' % (
len(self.missing),
len(self.extra) + len(self.extra_positional)
))
class ImportStringError(ImportError):
"""Provides information about a failed :func:`import_string` attempt."""
#: String in dotted notation that failed to be imported.
import_name = None
#: Wrapped exception.
exception = None
def __init__(self, import_name, exception):
self.import_name = import_name
self.exception = exception
msg = (
'import_string() failed for %r. Possible reasons are:\n\n'
'- missing __init__.py in a package;\n'
'- package or module path not included in sys.path;\n'
'- duplicated package or module name taking precedence in '
'sys.path;\n'
'- missing module, class, function or variable;\n\n'
'Debugged import:\n\n%s\n\n'
'Original exception:\n\n%s: %s')
name = ''
tracked = []
for part in import_name.replace(':', '.').split('.'):
name += (name and '.') + part
imported = import_string(name, silent=True)
if imported:
tracked.append((name, getattr(imported, '__file__', None)))
else:
track = ['- %r found in %r.' % (n, i) for n, i in tracked]
track.append('- %r not found.' % name)
msg = msg % (import_name, '\n'.join(track),
exception.__class__.__name__, str(exception))
break
ImportError.__init__(self, msg)
def __repr__(self):
return '<%s(%r, %r)>' % (self.__class__.__name__, self.import_name,
self.exception)
# DEPRECATED
# these objects were previously in this module as well. we import
# them here for backwards compatibility with old pickles.
from werkzeug.datastructures import ( # noqa
MultiDict, CombinedMultiDict, Headers, EnvironHeaders)
from werkzeug.http import parse_cookie, dump_cookie # noqa
|
aymara/verbenet-editor | refs/heads/master | syntacticframes_project/syntacticframes/management/commands/vinf.py | 1 | import sys
import traceback
import logging
from time import gmtime, strftime
from django.core.management.base import BaseCommand
from django.db import transaction
from syntacticframes.models import VerbNetClass, VerbTranslation, VerbNetMember
class Command(BaseCommand):
def handle(self, *args, **options):
verb_logger = logging.getLogger('verbs')
when = strftime("%d/%m/%Y %H:%M:%S", gmtime())
verb_logger.info("{}: Started Vinf update.".format(when))
try:
with transaction.atomic():
for vn_class in VerbNetClass.objects.all():
for frameset in vn_class.verbnetframeset_set.all():
for frame in frameset.verbnetframe_set.all():
if 'Vinf' in frame.roles_syntax or 'Vinf' in frame.syntax:
frame.roles_syntax = frame.roles_syntax.replace('Vinf', 'V-inf')
frame.syntax = frame.syntax.replace('Vinf', 'V-inf')
frame.save()
when = strftime("%d/%m/%Y %H:%M:%S", gmtime())
verb_logger.info("{}: {}, {}: {}".format(when, vn_class.name, frameset.name, frame))
except:
when = strftime("%d/%m/%Y %H:%M:%S", gmtime())
exc_type, exc_value, exc_traceback = sys.exc_info()
verb_logger.info(''.join(traceback.format_exception(exc_type, exc_value, exc_traceback)))
verb_logger.info("{}: Exception, everything was backed out.".format(when))
else:
when = strftime("%d/%m/%Y %H:%M:%S", gmtime())
verb_logger.info("{}: Ended Vinf update.".format(when))
verb_logger.info('')
|
ray-project/ray | refs/heads/master | doc/kubernetes/example_scripts/run_on_head.py | 1 | from collections import Counter
import sys
import time
import ray
# Run this script on the Ray head node using kubectl exec.
@ray.remote
def gethostname(x):
import platform
import time
time.sleep(0.01)
return x + (platform.node(), )
def wait_for_nodes(expected):
# Wait for all nodes to join the cluster.
while True:
resources = ray.cluster_resources()
node_keys = [key for key in resources if "node" in key]
num_nodes = sum(resources[node_key] for node_key in node_keys)
if num_nodes < expected:
print("{} nodes have joined so far, waiting for {} more.".format(
num_nodes, expected - num_nodes))
sys.stdout.flush()
time.sleep(1)
else:
break
def main():
wait_for_nodes(3)
# Check that objects can be transferred from each node to each other node.
for i in range(10):
print("Iteration {}".format(i))
results = [
gethostname.remote(gethostname.remote(())) for _ in range(100)
]
print(Counter(ray.get(results)))
sys.stdout.flush()
print("Success!")
sys.stdout.flush()
if __name__ == "__main__":
ray.init(address="auto")
main()
|
40223114/w16 | refs/heads/master | static/Brython3.1.3-20150514-095342/Lib/this.py | 948 | s = """Gur Mra bs Clguba, ol Gvz Crgref
Ornhgvshy vf orggre guna htyl.
Rkcyvpvg vf orggre guna vzcyvpvg.
Fvzcyr vf orggre guna pbzcyrk.
Pbzcyrk vf orggre guna pbzcyvpngrq.
Syng vf orggre guna arfgrq.
Fcnefr vf orggre guna qrafr.
Ernqnovyvgl pbhagf.
Fcrpvny pnfrf nera'g fcrpvny rabhtu gb oernx gur ehyrf.
Nygubhtu cenpgvpnyvgl orngf chevgl.
Reebef fubhyq arire cnff fvyragyl.
Hayrff rkcyvpvgyl fvyraprq.
Va gur snpr bs nzovthvgl, ershfr gur grzcgngvba gb thrff.
Gurer fubhyq or bar-- naq cersrenoyl bayl bar --boivbhf jnl gb qb vg.
Nygubhtu gung jnl znl abg or boivbhf ng svefg hayrff lbh'er Qhgpu.
Abj vf orggre guna arire.
Nygubhtu arire vf bsgra orggre guna *evtug* abj.
Vs gur vzcyrzragngvba vf uneq gb rkcynva, vg'f n onq vqrn.
Vs gur vzcyrzragngvba vf rnfl gb rkcynva, vg znl or n tbbq vqrn.
Anzrfcnprf ner bar ubaxvat terng vqrn -- yrg'f qb zber bs gubfr!"""
d = {}
for c in (65, 97):
for i in range(26):
d[chr(i+c)] = chr((i+13) % 26 + c)
print("".join([d.get(c, c) for c in s]))
|
lmorchard/django | refs/heads/master | tests/template_tests/filter_tests/test_dictsort.py | 342 | from django.template.defaultfilters import dictsort
from django.test import SimpleTestCase
class FunctionTests(SimpleTestCase):
def test_sort(self):
sorted_dicts = dictsort(
[{'age': 23, 'name': 'Barbara-Ann'},
{'age': 63, 'name': 'Ra Ra Rasputin'},
{'name': 'Jonny B Goode', 'age': 18}],
'age',
)
self.assertEqual(
[sorted(dict.items()) for dict in sorted_dicts],
[[('age', 18), ('name', 'Jonny B Goode')],
[('age', 23), ('name', 'Barbara-Ann')],
[('age', 63), ('name', 'Ra Ra Rasputin')]],
)
def test_dictsort_complex_sorting_key(self):
"""
Since dictsort uses template.Variable under the hood, it can sort
on keys like 'foo.bar'.
"""
data = [
{'foo': {'bar': 1, 'baz': 'c'}},
{'foo': {'bar': 2, 'baz': 'b'}},
{'foo': {'bar': 3, 'baz': 'a'}},
]
sorted_data = dictsort(data, 'foo.baz')
self.assertEqual([d['foo']['bar'] for d in sorted_data], [3, 2, 1])
def test_invalid_values(self):
"""
If dictsort is passed something other than a list of dictionaries,
fail silently.
"""
self.assertEqual(dictsort([1, 2, 3], 'age'), '')
self.assertEqual(dictsort('Hello!', 'age'), '')
self.assertEqual(dictsort({'a': 1}, 'age'), '')
self.assertEqual(dictsort(1, 'age'), '')
|
mskvortsov/coreclr | refs/heads/master | src/scripts/genXplatEventing.py | 25 | #
## Licensed to the .NET Foundation under one or more agreements.
## The .NET Foundation licenses this file to you under the MIT license.
## See the LICENSE file in the project root for more information.
#
#
#USAGE:
#Add Events: modify <root>src/vm/ClrEtwAll.man
#Look at the Code in <root>/src/scripts/genXplatLttng.py for using subroutines in this file
#
# Python 2 compatibility
from __future__ import print_function
import os
import xml.dom.minidom as DOM
stdprolog="""
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
/******************************************************************
DO NOT MODIFY. AUTOGENERATED FILE.
This file is generated using the logic from <root>/src/scripts/genXplatEventing.py
******************************************************************/
"""
stdprolog_cmake="""
#
#
#******************************************************************
#DO NOT MODIFY. AUTOGENERATED FILE.
#This file is generated using the logic from <root>/src/scripts/genXplatEventing.py
#******************************************************************
"""
lindent = " ";
palDataTypeMapping ={
#constructed types
"win:null" :" ",
"win:Int64" :"const __int64",
"win:ULong" :"const ULONG",
"win:count" :"*",
"win:Struct" :"const void",
#actual spec
"win:GUID" :"const GUID",
"win:AnsiString" :"LPCSTR",
"win:UnicodeString" :"PCWSTR",
"win:Double" :"const double",
"win:Int32" :"const signed int",
"win:Boolean" :"const BOOL",
"win:UInt64" :"const unsigned __int64",
"win:UInt32" :"const unsigned int",
"win:UInt16" :"const unsigned short",
"win:UInt8" :"const unsigned char",
"win:Pointer" :"const void*",
"win:Binary" :"const BYTE"
}
# A Template represents an ETW template can contain 1 or more AbstractTemplates
# The AbstractTemplate contains FunctionSignature
# FunctionSignature consist of FunctionParameter representing each parameter in it's signature
def getParamSequenceSize(paramSequence, estimate):
total = 0
pointers = 0
for param in paramSequence:
if param == "win:Int64":
total += 8
elif param == "win:ULong":
total += 4
elif param == "GUID":
total += 16
elif param == "win:Double":
total += 8
elif param == "win:Int32":
total += 4
elif param == "win:Boolean":
total += 4
elif param == "win:UInt64":
total += 8
elif param == "win:UInt32":
total += 4
elif param == "win:UInt16":
total += 2
elif param == "win:UInt8":
total += 1
elif param == "win:Pointer":
if estimate:
total += 8
else:
pointers += 1
elif param == "win:Binary":
total += 1
elif estimate:
if param == "win:AnsiString":
total += 32
elif param == "win:UnicodeString":
total += 64
elif param == "win:Struct":
total += 32
else:
raise Exception("Don't know size for " + param)
if estimate:
return total
return total, pointers
class Template:
def __repr__(self):
return "<Template " + self.name + ">"
def __init__(self, templateName, fnPrototypes, dependencies, structSizes, arrays):
self.name = templateName
self.signature = FunctionSignature()
self.structs = structSizes
self.arrays = arrays
for variable in fnPrototypes.paramlist:
for dependency in dependencies[variable]:
if not self.signature.getParam(dependency):
self.signature.append(dependency, fnPrototypes.getParam(dependency))
def getFnParam(self, name):
return self.signature.getParam(name)
@property
def num_params(self):
return len(self.signature.paramlist)
@property
def estimated_size(self):
total = getParamSequenceSize((self.getFnParam(paramName).winType for paramName in self.signature.paramlist), True)
if total < 32:
total = 32
elif total > 1024:
total = 1024
return total
class FunctionSignature:
def __repr__(self):
return ", ".join(self.paramlist)
def __init__(self):
self.LUT = {} # dictionary of FunctionParameter
self.paramlist = [] # list of parameters to maintain their order in signature
def append(self,variable,fnparam):
self.LUT[variable] = fnparam
self.paramlist.append(variable)
def getParam(self,variable):
return self.LUT.get(variable)
def getLength(self):
return len(self.paramlist)
class FunctionParameter:
def __repr__(self):
return self.name
def __init__(self,winType,name,count,prop):
self.winType = winType #ETW type as given in the manifest
self.name = name #parameter name as given in the manifest
self.prop = prop #any special property as determined by the manifest and developer
#self.count #indicates if the parameter is a pointer
if count == "win:null":
self.count = "win:null"
elif count or winType == "win:GUID" or count == "win:count":
#special case for GUIDS, consider them as structs
self.count = "win:count"
else:
self.count = "win:null"
def getTopLevelElementsByTagName(node,tag):
dataNodes = []
for element in node.getElementsByTagName(tag):
if element.parentNode == node:
dataNodes.append(element)
return dataNodes
ignoredXmlTemplateAttribes = frozenset(["map","outType"])
usedXmlTemplateAttribes = frozenset(["name","inType","count", "length"])
def parseTemplateNodes(templateNodes):
#return values
allTemplates = {}
for templateNode in templateNodes:
structCounts = {}
arrays = {}
templateName = templateNode.getAttribute('tid')
var_Dependecies = {}
fnPrototypes = FunctionSignature()
dataNodes = getTopLevelElementsByTagName(templateNode,'data')
# Validate that no new attributes has been added to manifest
for dataNode in dataNodes:
nodeMap = dataNode.attributes
for attrib in nodeMap.values():
attrib_name = attrib.name
if attrib_name not in ignoredXmlTemplateAttribes and attrib_name not in usedXmlTemplateAttribes:
raise ValueError('unknown attribute: '+ attrib_name + ' in template:'+ templateName)
for dataNode in dataNodes:
variable = dataNode.getAttribute('name')
wintype = dataNode.getAttribute('inType')
#count and length are the same
wincount = dataNode.getAttribute('count')
winlength = dataNode.getAttribute('length');
var_Props = None
var_dependency = [variable]
if winlength:
if wincount:
raise Exception("both count and length property found on: " + variable + "in template: " + templateName)
wincount = winlength
if (wincount.isdigit() and int(wincount) ==1):
wincount = ''
if wincount:
if (wincount.isdigit()):
var_Props = wincount
elif fnPrototypes.getParam(wincount):
var_Props = wincount
var_dependency.insert(0, wincount)
arrays[variable] = wincount
#construct the function signature
if wintype == "win:GUID":
var_Props = "sizeof(GUID)/sizeof(int)"
var_Dependecies[variable] = var_dependency
fnparam = FunctionParameter(wintype,variable,wincount,var_Props)
fnPrototypes.append(variable,fnparam)
structNodes = getTopLevelElementsByTagName(templateNode,'struct')
for structToBeMarshalled in structNodes:
structName = structToBeMarshalled.getAttribute('name')
countVarName = structToBeMarshalled.getAttribute('count')
assert(countVarName == "Count")
assert(countVarName in fnPrototypes.paramlist)
if not countVarName:
raise ValueError("Struct '%s' in template '%s' does not have an attribute count." % (structName, templateName))
names = [x.attributes['name'].value for x in structToBeMarshalled.getElementsByTagName("data")]
types = [x.attributes['inType'].value for x in structToBeMarshalled.getElementsByTagName("data")]
structCounts[structName] = countVarName
var_Dependecies[structName] = [countVarName, structName]
fnparam_pointer = FunctionParameter("win:Struct", structName, "win:count", countVarName)
fnPrototypes.append(structName, fnparam_pointer)
allTemplates[templateName] = Template(templateName, fnPrototypes, var_Dependecies, structCounts, arrays)
return allTemplates
def generateClrallEvents(eventNodes,allTemplates):
clrallEvents = []
for eventNode in eventNodes:
eventName = eventNode.getAttribute('symbol')
templateName = eventNode.getAttribute('template')
#generate EventEnabled
clrallEvents.append("inline BOOL EventEnabled")
clrallEvents.append(eventName)
clrallEvents.append("() {return ")
clrallEvents.append("EventPipeEventEnabled" + eventName + "() || ")
clrallEvents.append("(XplatEventLogger::IsEventLoggingEnabled() && EventXplatEnabled")
clrallEvents.append(eventName+"());}\n\n")
#generate FireEtw functions
fnptype = []
fnbody = []
fnptype.append("inline ULONG FireEtw")
fnptype.append(eventName)
fnptype.append("(\n")
line = []
fnptypeline = []
if templateName:
template = allTemplates[templateName]
fnSig = template.signature
for params in fnSig.paramlist:
fnparam = fnSig.getParam(params)
wintypeName = fnparam.winType
typewName = palDataTypeMapping[wintypeName]
winCount = fnparam.count
countw = palDataTypeMapping[winCount]
if params in template.structs:
fnptypeline.append("%sint %s_ElementSize,\n" % (lindent, params))
fnptypeline.append(lindent)
fnptypeline.append(typewName)
fnptypeline.append(countw)
fnptypeline.append(" ")
fnptypeline.append(fnparam.name)
fnptypeline.append(",\n")
#fnsignature
for params in fnSig.paramlist:
fnparam = fnSig.getParam(params)
if params in template.structs:
line.append(fnparam.name + "_ElementSize")
line.append(", ")
line.append(fnparam.name)
line.append(",")
#remove trailing commas
if len(line) > 0:
del line[-1]
if len(fnptypeline) > 0:
del fnptypeline[-1]
fnptype.extend(fnptypeline)
fnptype.append("\n)\n{\n")
fnbody.append(lindent)
fnbody.append("ULONG status = EventPipeWriteEvent" + eventName + "(" + ''.join(line) + ");\n")
fnbody.append(lindent)
fnbody.append("if(XplatEventLogger::IsEventLoggingEnabled())\n")
fnbody.append(lindent)
fnbody.append("{\n")
fnbody.append(lindent)
fnbody.append(lindent)
fnbody.append("status &= FireEtXplat")
fnbody.append(eventName)
fnbody.append("(")
fnbody.extend(line)
fnbody.append(");\n")
fnbody.append(lindent)
fnbody.append("}\n")
fnbody.append(lindent)
fnbody.append("return status;\n")
fnbody.append("}\n\n")
clrallEvents.extend(fnptype)
clrallEvents.extend(fnbody)
return ''.join(clrallEvents)
def generateClrXplatEvents(eventNodes, allTemplates):
clrallEvents = []
for eventNode in eventNodes:
eventName = eventNode.getAttribute('symbol')
templateName = eventNode.getAttribute('template')
#generate EventEnabled
clrallEvents.append("extern \"C\" BOOL EventXplatEnabled")
clrallEvents.append(eventName)
clrallEvents.append("();\n")
#generate FireEtw functions
fnptype = []
fnptypeline = []
fnptype.append("extern \"C\" ULONG FireEtXplat")
fnptype.append(eventName)
fnptype.append("(\n")
if templateName:
template = allTemplates[templateName]
fnSig = template.signature
for params in fnSig.paramlist:
fnparam = fnSig.getParam(params)
wintypeName = fnparam.winType
typewName = palDataTypeMapping[wintypeName]
winCount = fnparam.count
countw = palDataTypeMapping[winCount]
if params in template.structs:
fnptypeline.append("%sint %s_ElementSize,\n" % (lindent, params))
fnptypeline.append(lindent)
fnptypeline.append(typewName)
fnptypeline.append(countw)
fnptypeline.append(" ")
fnptypeline.append(fnparam.name)
fnptypeline.append(",\n")
#remove trailing commas
if len(fnptypeline) > 0:
del fnptypeline[-1]
fnptype.extend(fnptypeline)
fnptype.append("\n);\n")
clrallEvents.extend(fnptype)
return ''.join(clrallEvents)
def generateClrEventPipeWriteEvents(eventNodes, allTemplates):
clrallEvents = []
for eventNode in eventNodes:
eventName = eventNode.getAttribute('symbol')
templateName = eventNode.getAttribute('template')
#generate EventPipeEventEnabled and EventPipeWriteEvent functions
eventenabled = []
writeevent = []
fnptypeline = []
eventenabled.append("extern \"C\" bool EventPipeEventEnabled")
eventenabled.append(eventName)
eventenabled.append("();\n")
writeevent.append("extern \"C\" ULONG EventPipeWriteEvent")
writeevent.append(eventName)
writeevent.append("(\n")
if templateName:
template = allTemplates[templateName]
fnSig = template.signature
for params in fnSig.paramlist:
fnparam = fnSig.getParam(params)
wintypeName = fnparam.winType
typewName = palDataTypeMapping[wintypeName]
winCount = fnparam.count
countw = palDataTypeMapping[winCount]
if params in template.structs:
fnptypeline.append("%sint %s_ElementSize,\n" % (lindent, params))
fnptypeline.append(lindent)
fnptypeline.append(typewName)
fnptypeline.append(countw)
fnptypeline.append(" ")
fnptypeline.append(fnparam.name)
fnptypeline.append(",\n")
#remove trailing commas
if len(fnptypeline) > 0:
del fnptypeline[-1]
writeevent.extend(fnptypeline)
writeevent.append("\n);\n")
clrallEvents.extend(eventenabled)
clrallEvents.extend(writeevent)
return ''.join(clrallEvents)
#generates the dummy header file which is used by the VM as entry point to the logging Functions
def generateclrEtwDummy(eventNodes,allTemplates):
clretmEvents = []
for eventNode in eventNodes:
eventName = eventNode.getAttribute('symbol')
templateName = eventNode.getAttribute('template')
fnptype = []
#generate FireEtw functions
fnptype.append("#define FireEtw")
fnptype.append(eventName)
fnptype.append("(");
line = []
if templateName:
template = allTemplates[templateName]
fnSig = template.signature
for params in fnSig.paramlist:
fnparam = fnSig.getParam(params)
if params in template.structs:
line.append(fnparam.name + "_ElementSize")
line.append(", ")
line.append(fnparam.name)
line.append(", ")
#remove trailing commas
if len(line) > 0:
del line[-1]
fnptype.extend(line)
fnptype.append(") 0\n")
clretmEvents.extend(fnptype)
return ''.join(clretmEvents)
def generateClralltestEvents(sClrEtwAllMan):
tree = DOM.parse(sClrEtwAllMan)
clrtestEvents = []
for providerNode in tree.getElementsByTagName('provider'):
templateNodes = providerNode.getElementsByTagName('template')
allTemplates = parseTemplateNodes(templateNodes)
eventNodes = providerNode.getElementsByTagName('event')
for eventNode in eventNodes:
eventName = eventNode.getAttribute('symbol')
templateName = eventNode.getAttribute('template')
clrtestEvents.append(" EventXplatEnabled" + eventName + "();\n")
clrtestEvents.append("Error |= FireEtXplat" + eventName + "(\n")
line =[]
if templateName:
template = allTemplates[templateName]
fnSig = template.signature
for params in fnSig.paramlist:
if params in template.structs:
line.append("sizeof(Struct1),\n")
argline =''
fnparam = fnSig.getParam(params)
if fnparam.name.lower() == 'count':
argline = '2'
else:
if fnparam.winType == "win:Binary":
argline = 'win_Binary'
elif fnparam.winType == "win:Pointer" and fnparam.count == "win:count":
argline = "(const void**)&var11"
elif fnparam.winType == "win:Pointer" :
argline = "(const void*)var11"
elif fnparam.winType =="win:AnsiString":
argline = '" Testing AniString "'
elif fnparam.winType =="win:UnicodeString":
argline = 'W(" Testing UnicodeString ")'
else:
if fnparam.count == "win:count":
line.append("&")
argline = fnparam.winType.replace(":","_")
line.append(argline)
line.append(",\n")
#remove trailing commas
if len(line) > 0:
del line[-1]
line.append("\n")
line.append(");\n")
clrtestEvents.extend(line)
return ''.join(clrtestEvents)
def generateSanityTest(sClrEtwAllMan,testDir):
if not testDir:
return
print('Generating Event Logging Tests')
if not os.path.exists(testDir):
os.makedirs(testDir)
cmake_file = testDir + "/CMakeLists.txt"
test_cpp = "clralltestevents.cpp"
testinfo = testDir + "/testinfo.dat"
Cmake_file = open(cmake_file,'w')
Test_cpp = open(testDir + "/" + test_cpp,'w')
Testinfo = open(testinfo,'w')
#CMake File:
Cmake_file.write(stdprolog_cmake)
Cmake_file.write("""
cmake_minimum_required(VERSION 2.8.12.2)
set(CMAKE_INCLUDE_CURRENT_DIR ON)
set(SOURCES
""")
Cmake_file.write(test_cpp)
Cmake_file.write("""
)
include_directories(${GENERATED_INCLUDE_DIR})
include_directories(${COREPAL_SOURCE_DIR}/inc/rt)
add_executable(eventprovidertest
${SOURCES}
)
set(EVENT_PROVIDER_DEPENDENCIES "")
set(EVENT_PROVIDER_LINKER_OTPTIONS "")
if(FEATURE_EVENT_TRACE)
add_definitions(-DFEATURE_EVENT_TRACE=1)
list(APPEND EVENT_PROVIDER_DEPENDENCIES
coreclrtraceptprovider
eventprovider
)
list(APPEND EVENT_PROVIDER_LINKER_OTPTIONS
${EVENT_PROVIDER_DEPENDENCIES}
)
endif(FEATURE_EVENT_TRACE)
add_dependencies(eventprovidertest ${EVENT_PROVIDER_DEPENDENCIES} coreclrpal)
target_link_libraries(eventprovidertest
coreclrpal
${EVENT_PROVIDER_LINKER_OTPTIONS}
)
""")
Testinfo.write("""
Copyright (c) Microsoft Corporation. All rights reserved.
#
Version = 1.0
Section = EventProvider
Function = EventProvider
Name = PAL test for FireEtW* and EventEnabled* functions
TYPE = DEFAULT
EXE1 = eventprovidertest
Description
=This is a sanity test to check that there are no crashes in Xplat eventing
""")
#Test.cpp
Test_cpp.write(stdprolog)
Test_cpp.write("""
/*=====================================================================
**
** Source: clralltestevents.cpp
**
** Purpose: Ensure Correctness of Eventing code
**
**
**===================================================================*/
#include <palsuite.h>
#include <clrxplatevents.h>
typedef struct _Struct1 {
ULONG Data1;
unsigned short Data2;
unsigned short Data3;
unsigned char Data4[8];
} Struct1;
Struct1 var21[2] = { { 245, 13, 14, "deadbea" }, { 542, 0, 14, "deadflu" } };
Struct1* var11 = var21;
Struct1* win_Struct = var21;
GUID win_GUID ={ 245, 13, 14, "deadbea" };
double win_Double =34.04;
ULONG win_ULong = 34;
BOOL win_Boolean = FALSE;
unsigned __int64 win_UInt64 = 114;
unsigned int win_UInt32 = 4;
unsigned short win_UInt16 = 12;
unsigned char win_UInt8 = 9;
int win_Int32 = 12;
BYTE* win_Binary =(BYTE*)var21 ;
int __cdecl main(int argc, char **argv)
{
/* Initialize the PAL.
*/
if(0 != PAL_Initialize(argc, argv))
{
return FAIL;
}
ULONG Error = ERROR_SUCCESS;
#if defined(FEATURE_EVENT_TRACE)
Trace("\\n Starting functional eventing APIs tests \\n");
""")
Test_cpp.write(generateClralltestEvents(sClrEtwAllMan))
Test_cpp.write("""
/* Shutdown the PAL.
*/
if (Error != ERROR_SUCCESS)
{
Fail("One or more eventing Apis failed\\n ");
return FAIL;
}
Trace("\\n All eventing APIs were fired succesfully \\n");
#endif //defined(FEATURE_EVENT_TRACE)
PAL_Terminate();
return PASS;
}
""")
Cmake_file.close()
Test_cpp.close()
Testinfo.close()
def generateEtmDummyHeader(sClrEtwAllMan,clretwdummy):
if not clretwdummy:
return
print(' Generating Dummy Event Headers')
tree = DOM.parse(sClrEtwAllMan)
incDir = os.path.dirname(os.path.realpath(clretwdummy))
if not os.path.exists(incDir):
os.makedirs(incDir)
Clretwdummy = open(clretwdummy,'w')
Clretwdummy.write(stdprolog + "\n")
for providerNode in tree.getElementsByTagName('provider'):
templateNodes = providerNode.getElementsByTagName('template')
allTemplates = parseTemplateNodes(templateNodes)
eventNodes = providerNode.getElementsByTagName('event')
#pal: create etmdummy.h
Clretwdummy.write(generateclrEtwDummy(eventNodes, allTemplates) + "\n")
Clretwdummy.close()
def generatePlformIndependentFiles(sClrEtwAllMan,incDir,etmDummyFile):
generateEtmDummyHeader(sClrEtwAllMan,etmDummyFile)
tree = DOM.parse(sClrEtwAllMan)
if not incDir:
return
print(' Generating Event Headers')
if not os.path.exists(incDir):
os.makedirs(incDir)
clrallevents = incDir + "/clretwallmain.h"
clrxplatevents = incDir + "/clrxplatevents.h"
clreventpipewriteevents = incDir + "/clreventpipewriteevents.h"
Clrallevents = open(clrallevents,'w')
Clrxplatevents = open(clrxplatevents,'w')
Clreventpipewriteevents = open(clreventpipewriteevents,'w')
Clrallevents.write(stdprolog + "\n")
Clrxplatevents.write(stdprolog + "\n")
Clreventpipewriteevents.write(stdprolog + "\n")
Clrallevents.write("\n#include \"clrxplatevents.h\"\n")
Clrallevents.write("#include \"clreventpipewriteevents.h\"\n\n")
for providerNode in tree.getElementsByTagName('provider'):
templateNodes = providerNode.getElementsByTagName('template')
allTemplates = parseTemplateNodes(templateNodes)
eventNodes = providerNode.getElementsByTagName('event')
#vm header:
Clrallevents.write(generateClrallEvents(eventNodes, allTemplates) + "\n")
#pal: create clrallevents.h
Clrxplatevents.write(generateClrXplatEvents(eventNodes, allTemplates) + "\n")
#eventpipe: create clreventpipewriteevents.h
Clreventpipewriteevents.write(generateClrEventPipeWriteEvents(eventNodes, allTemplates) + "\n")
Clrxplatevents.close()
Clrallevents.close()
Clreventpipewriteevents.close()
class EventExclusions:
def __init__(self):
self.nostack = set()
self.explicitstack = set()
self.noclrinstance = set()
def parseExclusionList(exclusionListFile):
ExclusionFile = open(exclusionListFile,'r')
exclusionInfo = EventExclusions()
for line in ExclusionFile:
line = line.strip()
#remove comments
if not line or line.startswith('#'):
continue
tokens = line.split(':')
#entries starting with nomac are ignored
if "nomac" in tokens:
continue
if len(tokens) > 5:
raise Exception("Invalid Entry " + line + "in "+ exclusionListFile)
eventProvider = tokens[2]
eventTask = tokens[1]
eventSymbol = tokens[4]
if eventProvider == '':
eventProvider = "*"
if eventTask == '':
eventTask = "*"
if eventSymbol == '':
eventSymbol = "*"
entry = eventProvider + ":" + eventTask + ":" + eventSymbol
if tokens[0].lower() == "nostack":
exclusionInfo.nostack.add(entry)
if tokens[0].lower() == "stack":
exclusionInfo.explicitstack.add(entry)
if tokens[0].lower() == "noclrinstanceid":
exclusionInfo.noclrinstance.add(entry)
ExclusionFile.close()
return exclusionInfo
def getStackWalkBit(eventProvider, taskName, eventSymbol, stackSet):
for entry in stackSet:
tokens = entry.split(':')
if len(tokens) != 3:
raise Exception("Error, possible error in the script which introduced the enrty "+ entry)
eventCond = tokens[0] == eventProvider or tokens[0] == "*"
taskCond = tokens[1] == taskName or tokens[1] == "*"
symbolCond = tokens[2] == eventSymbol or tokens[2] == "*"
if eventCond and taskCond and symbolCond:
return False
return True
#Add the miscelaneous checks here
def checkConsistency(sClrEtwAllMan,exclusionListFile):
tree = DOM.parse(sClrEtwAllMan)
exclusionInfo = parseExclusionList(exclusionListFile)
for providerNode in tree.getElementsByTagName('provider'):
stackSupportSpecified = {}
eventNodes = providerNode.getElementsByTagName('event')
templateNodes = providerNode.getElementsByTagName('template')
eventProvider = providerNode.getAttribute('name')
allTemplates = parseTemplateNodes(templateNodes)
for eventNode in eventNodes:
taskName = eventNode.getAttribute('task')
eventSymbol = eventNode.getAttribute('symbol')
eventTemplate = eventNode.getAttribute('template')
eventValue = int(eventNode.getAttribute('value'))
clrInstanceBit = getStackWalkBit(eventProvider, taskName, eventSymbol, exclusionInfo.noclrinstance)
sLookupFieldName = "ClrInstanceID"
sLookupFieldType = "win:UInt16"
if clrInstanceBit and allTemplates.get(eventTemplate):
# check for the event template and look for a field named ClrInstanceId of type win:UInt16
fnParam = allTemplates[eventTemplate].getFnParam(sLookupFieldName)
if not(fnParam and fnParam.winType == sLookupFieldType):
raise Exception(exclusionListFile + ":No " + sLookupFieldName + " field of type " + sLookupFieldType + " for event symbol " + eventSymbol)
# If some versions of an event are on the nostack/stack lists,
# and some versions are not on either the nostack or stack list,
# then developer likely forgot to specify one of the versions
eventStackBitFromNoStackList = getStackWalkBit(eventProvider, taskName, eventSymbol, exclusionInfo.nostack)
eventStackBitFromExplicitStackList = getStackWalkBit(eventProvider, taskName, eventSymbol, exclusionInfo.explicitstack)
sStackSpecificityError = exclusionListFile + ": Error processing event :" + eventSymbol + "(ID" + str(eventValue) + "): This file must contain either ALL versions of this event or NO versions of this event. Currently some, but not all, versions of this event are present\n"
if not stackSupportSpecified.get(eventValue):
# Haven't checked this event before. Remember whether a preference is stated
if ( not eventStackBitFromNoStackList) or ( not eventStackBitFromExplicitStackList):
stackSupportSpecified[eventValue] = True
else:
stackSupportSpecified[eventValue] = False
else:
# We've checked this event before.
if stackSupportSpecified[eventValue]:
# When we last checked, a preference was previously specified, so it better be specified here
if eventStackBitFromNoStackList and eventStackBitFromExplicitStackList:
raise Exception(sStackSpecificityError)
else:
# When we last checked, a preference was not previously specified, so it better not be specified here
if ( not eventStackBitFromNoStackList) or ( not eventStackBitFromExplicitStackList):
raise Exception(sStackSpecificityError)
import argparse
import sys
def main(argv):
#parse the command line
parser = argparse.ArgumentParser(description="Generates the Code required to instrument LTTtng logging mechanism")
required = parser.add_argument_group('required arguments')
required.add_argument('--man', type=str, required=True,
help='full path to manifest containig the description of events')
required.add_argument('--exc', type=str, required=True,
help='full path to exclusion list')
required.add_argument('--inc', type=str, default=None,
help='full path to directory where the header files will be generated')
required.add_argument('--dummy', type=str,default=None,
help='full path to file that will have dummy definitions of FireEtw functions')
required.add_argument('--testdir', type=str, default=None,
help='full path to directory where the test assets will be deployed' )
args, unknown = parser.parse_known_args(argv)
if unknown:
print('Unknown argument(s): ', ', '.join(unknown))
return const.UnknownArguments
sClrEtwAllMan = args.man
exclusionListFile = args.exc
incdir = args.inc
etmDummyFile = args.dummy
testDir = args.testdir
checkConsistency(sClrEtwAllMan, exclusionListFile)
generatePlformIndependentFiles(sClrEtwAllMan,incdir,etmDummyFile)
generateSanityTest(sClrEtwAllMan,testDir)
if __name__ == '__main__':
return_code = main(sys.argv[1:])
sys.exit(return_code)
|
gpoulter/pydedupe | refs/heads/master | dedupe/linkcsv.py | 1 | """Helpers for record linkage with CSV files for input and output"""
import contextlib as ctx
import logging
import os
from os.path import join
import dedupe.csv as csv
import dedupe.group as group
import dedupe.sim as sim
LOG = logging.getLogger('dedupe.linkcsv')
def write_indices(indices, outdir, prefix):
"""Write indices in CSV format.
:type indices: :class:`~indexer.Indices`
:param indices: write one file per index in this dictionary.
:type outdir: :class:`str`
:param outdir: write index files to this directory.
:type prefix: :class:`str`
:param prefix: prepend this to each output file name.
>>> from dedupe import linkcsv, csv, block, sim
>>> makekey = lambda r: [int(r[1])]
>>> compare = lambda x, y: float(int(x[1])==int(y[1]))
>>> records = [('A', 5.5), ('C', 5.25)]
>>> indexstrategy = [ ("Idx", block.Index, makekey) ]
>>> indices = sim.Indices(indexstrategy, records)
>>> streams = csv._fake_open(linkcsv)
>>> linkcsv.write_indices(indices, outdir="/tmp", prefix="foo-")
>>> stream = streams["/tmp/foo-Idx.csv"]
>>> stream.seek(0)
>>> list(csv.Reader(stream, fields='Idx V1 V2'))
[Row(Idx=u'5', V1=u'A', V2=u'5.5'), Row(Idx=u'5', V1=u'C', V2=u'5.25')]
"""
def write_index(index, stream):
"""Write a single index in CSV format to a stream"""
writer = csv.Writer(stream)
for indexkey, rows in index.iteritems():
for row in rows:
writer.writerow([unicode(indexkey)]
+ [unicode(v) for v in row])
for indexname, index in indices.iteritems():
with open(join(outdir, prefix + indexname + '.csv'), 'wb') as stream:
write_index(index, stream)
def write_comparisons(ostream, comparator, comparisons, scores, indices1,
indices2=None, projection=None, origstream=None):
"""Write pairs of compared records, together with index keys and
field comparison weights. Inspection shows which index keys matched,
and the field-by-field similarity.
:type ostream: binary writer
:param ostream: where to write CSV for similarity vectors.
:type comparator: :class:`~sim.Record`
:param comparator: dict of named :class:`~sim.Field` field comparators
:type comparisons: {(`R`, `R`):[:class:`float`, ...], ...}
:param comparisons: Similarity vectors from pairs of record comparisons.
:type scores: {(`R`, `R`)::class:`float`, ...} or :keyword:`None`
:param scores: classifier scores to show for pairs of records.
:type indices1: :class:`~indexer.Indices`
:param indices1: index of records being linked
:type indices2: :class:`~indexer.Indices`
:param indices2: optional index of right-hand records for master-linkage.
:type projection: :class:`Projection`
:param projection: Converts each record into output form.
:type origstream: binary writer
:param origstream: write CSV for pairs of compared original records.
"""
if not comparisons:
return # in case no comparisons were done
# File for comparison statistics
writer = csv.Writer(ostream)
writer.writerow(["Score"] + indices1.keys() + comparator.keys())
indices2 = indices2 if indices2 else indices1
# File for original records
record_writer = None
# Obtain field-getter for each value comparator
field1 = [vcomp.field1 for vcomp in comparator.itervalues()]
field2 = [vcomp.field2 for vcomp in comparator.itervalues()]
# Use dummy classifier scores if None were provided
if scores is None:
scores = dict((k, 0) for k in comparisons.iterkeys())
# wrtie the similarity vectors
for (rec1, rec2), score in scores.iteritems():
weights = comparisons[(rec1, rec2)] # look up comparison vector
keys1 = [idx.makekey(rec1) for idx in indices1.itervalues()]
keys2 = [idx.makekey(rec2) for idx in indices2.itervalues()]
writer.writerow([u""] +
[u";".join(unicode(k) for k in kl) for kl in keys1] +
[unicode(f(rec1)) for f in field1])
writer.writerow([u""] +
[u";".join(unicode(k) for k in kl) for kl in keys2] +
[unicode(f(rec2)) for f in field2])
# Tuple of booleans indicating whether index keys are equal
idxmatch = [bool(set(k1).intersection(set(k2))) if
(k1 is not None and k2 is not None) else ""
for k1, k2 in zip(keys1, keys2)]
weightrow = [score] + idxmatch + list(weights)
writer.writerow(str(x) for x in weightrow)
if origstream is not None:
record_writer = csv.Writer(origstream)
if projection:
record_writer.writerow(projection.fields)
else:
projection = lambda x: x # no transformation
for (rec1, rec2), score in scores.iteritems():
record_writer.writerow(projection(rec1))
record_writer.writerow(projection(rec2))
def filelog(path):
"""Add filehandler to main logger, writing to :file:`{path}`."""
filehandler = logging.FileHandler(path)
filehandler.setFormatter(logging.Formatter(
'%(asctime)s log_level=%(levelname)s product=%(name)s %(message)s',
'%Y-%m-%d %H:%M:%S'))
logging.getLogger().addHandler(filehandler)
def writecsv(path, rows, header=None):
"""Write the `header` and `rows` to csv file at `path`"""
with open(path, 'wb') as out:
writer = csv.Writer(out)
if header:
writer.writerow(header)
writer.writerows(rows)
def loadcsv(path):
"""Load records from csv at `path` as a list of :class:`namedtuple`"""
with open(path, 'rb') as istream:
return list(csv.Reader(istream))
class LinkCSV(object):
"""Link the input records, either to themselves or to the master records
if provided.
The linkage is performed in the constructor, which may therefore take a
long time to return. The `write_*` methods tell the instance which results
to write to CSV files. The strategy for linkage is made up of the
`indeces`, `comparator` and `classifier` parameters. Progress
messages and estimates are written to the root logger, for which
a FileHandler directs output to the output directory.
:type indexstrategy: [ (`str`, `type`, `function`) ]
:param indexstrategy: List of indexes to use, providing the index name, \
class for constructing the index, and function for producing the index key.
:type comparator: :class:`~sim.Record`
:param comparator: takes a pair of records and returns a similarity vector.
:type classifier: function({(`R`, `R`):[float]}) [(`R`, `R`)], [(`R`, `R`)]
:param classifier: separate record comparisons into matching/non-matching.
:type records: [`R`, ...]
:param records: input records for linkage analysis
:type odir: :class:`str` or :keyword:`None`
:param odir: Directory in which to place output files and log files.
:type master: [`R`, ...]
:param master: master records to which `records` should be linked.
:type logname: :class:`str` or :keyword:`None`
:param logname: Name of log file to write to in output directory.
:type indeces1, indeces2: :class:`~sim.Indeces`
:ivar indeces1, indeces2: Indexed input and master records.
:type matches, nonmatches: {(`R`, `R`)::class:`float`}
:ivar matches, nonmatches: classifier scores of matched/nonmatched pairs.
"""
def __init__(self, outdir, indexstrategy, comparator, classifier, records,
master=None, logname='linkage.log'):
"""
:rtype: {(R, R):float}, {(R, ):float}
:return: classifier scores for match pairs and non-match pairs
"""
self.comparator = comparator
self.indexstrategy = indexstrategy
self.classifier = classifier
self.records1 = records
self.records2 = master if master else []
self.outdir = outdir
if self.outdir is not None and logname is not None:
filelog(self.opath(logname))
# Index the records and print the stats
self.indices1 = sim.Indices(self.indexstrategy, self.records1)
self.indices2 = None
if self.records2:
self.indices2 = sim.Indices(self.indexstrategy, self.records2)
# Compute the similarity vectors
self.indices1.log_comparisons(self.indices2)
self.comparisons = self.indices1.compare(
self.comparator, self.indices2)
# Classify the similarity vectors
self.matches, self.nonmatches = classifier(self.comparisons)
def opath(self, name):
"""Path for a file `name` in the :attr:`odir`."""
return os.path.join(self.outdir, name)
@property
def fields1(self):
"""Field names on input records."""
try:
return self.records1[0]._fields
except (IndexError, AttributeError):
return []
@property
def fields2(self):
"""Field names on master records."""
try:
return self.records2[0]._fields
except (IndexError, AttributeError):
return []
@property
def projection(self):
"""Convert input/master records into output records."""
if self.fields1:
return csv.Projection.unionfields(self.fields2, self.fields1)
else:
return None
def write_all(self):
"""Call all of the other `write_*` methods, for full analysis.
.. warning::
The total output may be as much as 10x larger than the input file.
"""
self.write_records()
self.write_indeces()
if self.records2:
self.write_input_splits()
self.write_match_pairs()
self.write_nonmatch_pairs()
self.write_groups()
def write_records(
self, inputrecs="input-records.csv", masterrecs="input-master.csv"):
"""Write the input and master records CSV files."""
writecsv(self.opath(inputrecs), self.records1, self.fields1)
if self.indices2:
writecsv(self.opath(masterrecs), self.records2, self.fields2)
def write_indeces(self, inputpre="InputIdx-", masterpre="MasterIdx-"):
"""Write contents of each :class:`~indexer.Index` to files starting
with these prefixes."""
write_indices(self.indices1, self.outdir, inputpre)
if self.indices2:
write_indices(self.indices2, self.outdir, masterpre)
def write_input_splits(
self, matches='input-matchrows.csv', singles='input-singlerows.csv'):
"""Write input records that matched and did not match master (requires
that `master` was specified)."""
matchset = set(a for a, b in self.matches)
matchrows = [r for r in self.records1 if r in matchset]
singlerows = [r for r in self.records1 if r not in matchset]
writecsv(self.opath(matches), matchrows, self.fields1)
writecsv(self.opath(singles), singlerows, self.fields1)
def write_match_pairs(
self, comps="match-comparisons.csv", pairs="match-pairs.csv"):
"""For matched pairs, write the record comparisons and original record
pairs."""
_ = self
with ctx.nested(open(_.opath(comps), 'wb'),
open(_.opath(pairs), 'wb')) as (o_comps, o_pairs):
write_comparisons(o_comps, _.comparator, _.comparisons, _.matches,
_.indices1, _.indices2, self.projection, o_pairs)
def write_nonmatch_pairs(
self, comps="nonmatch-comparisons.csv", pairs="nonmatch-pairs.csv"):
"""For non-matched pairs, write the record comparisons and original
record pairs."""
_ = self
with ctx.nested(open(_.opath(comps), 'wb'),
open(_.opath(pairs), 'wb')) as (o_comps, o_pairs):
write_comparisons(
o_comps, _.comparator, _.comparisons, _.nonmatches,
_.indices1, _.indices2, self.projection, o_pairs)
def write_groups(self, groups="groups.csv"):
"""Write out all records, with numbered groups of mutually linked
records first."""
with open(self.opath(groups), 'wb') as ofile:
group.write_csv(
self.matches, self.records1 + self.records2,
ofile, self.projection)
|
psycofdj/xtdpy | refs/heads/master | xtd/core/logger/tools.py | 2 | # -*- coding: utf-8
#------------------------------------------------------------------#
__author__ = "Xavier MARCELET <xavier@marcelet.com>"
#------------------------------------------------------------------#
import logging
#------------------------------------------------------------------#
def get(p_module = None):
if (p_module == "root") or (p_module is None):
return logging.getLogger()
return logging.getLogger(p_module)
def __wrap(p_func, p_module, p_msg, *p_args, **p_kwds):
l_logger = logging.getLogger(p_module)
l_func = getattr(l_logger, p_func)
l_func(p_msg, *p_args, **p_kwds)
def debug(p_module, p_msg, *p_args, **p_kwds):
__wrap("debug", p_module, p_msg, *p_args, **p_kwds)
def info(p_module, p_msg, *p_args, **p_kwds):
__wrap("info", p_module, p_msg, *p_args, **p_kwds)
def warning(p_module, p_msg, *p_args, **p_kwds):
__wrap("warning", p_module, p_msg, *p_args, **p_kwds)
def error(p_module, p_msg, *p_args, **p_kwds):
__wrap("error", p_module, p_msg, *p_args, **p_kwds)
def critical(p_module, p_msg, *p_args, **p_kwds):
__wrap("critical", p_module, p_msg, *p_args, **p_kwds)
def exception(p_module, p_msg, *p_args, **p_kwds):
__wrap("exception", p_module, p_msg, *p_args, **p_kwds)
def log(p_level, p_module, p_msg, *p_args, **p_kwds):
__wrap(p_level, p_module, p_msg, *p_args, **p_kwds)
#------------------------------------------------------------------#
|
coderbone/SickRage | refs/heads/master | sickbeard/providers/hdspace.py | 3 | # Author: Idan Gutman
# Modified by jkaberg, https://github.com/jkaberg for SceneAccess
# Modified by 7ca for HDSpace
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
import urllib
import requests
from bs4 import BeautifulSoup
import logging
from sickbeard import tvcache
from sickbeard.providers import generic
class HDSpaceProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "HDSpace")
self.supportsBacklog = True
self.username = None
self.password = None
self.ratio = None
self.minseed = None
self.minleech = None
self.cache = HDSpaceCache(self)
self.urls = {'base_url': 'https://hd-space.org/',
'login': 'https://hd-space.org/index.php?page=login',
'search': 'https://hd-space.org/index.php?page=torrents&search=%s&active=1&options=0',
'rss': 'https://hd-space.org/rss_torrents.php?feed=dl'}
self.categories = [15, 21, 22, 24, 25, 40] # HDTV/DOC 1080/720, bluray, remux
self.urls[b'search'] += '&category='
for cat in self.categories:
self.urls[b'search'] += str(cat) + '%%3B'
self.urls[b'rss'] += '&cat[]=' + str(cat)
self.urls[b'search'] = self.urls[b'search'][:-4] # remove extra %%3B
self.url = self.urls[b'base_url']
def _checkAuth(self):
if not self.username or not self.password:
logging.warning("Invalid username or password. Check your settings")
return True
def _doLogin(self):
if 'pass' in requests.utils.dict_from_cookiejar(self.session.cookies):
return True
login_params = {'uid': self.username,
'pwd': self.password}
response = self.getURL(self.urls[b'login'], post_data=login_params, timeout=30)
if not response:
logging.warning("Unable to connect to provider")
return False
if re.search('Password Incorrect', response):
logging.warning("Invalid username or password. Check your settings")
return False
return True
def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
if not self._doLogin():
return results
for mode in search_strings.keys():
logging.debug("Search Mode: %s" % mode)
for search_string in search_strings[mode]:
if mode is not 'RSS':
searchURL = self.urls[b'search'] % (urllib.quote_plus(search_string.replace('.', ' ')),)
else:
searchURL = self.urls[b'search'] % ''
logging.debug("Search URL: %s" % searchURL)
if mode is not 'RSS':
logging.debug("Search string: %s" % search_string)
data = self.getURL(searchURL)
if not data or 'please try later' in data:
logging.debug("No data returned from provider")
continue
# Search result page contains some invalid html that prevents html parser from returning all data.
# We cut everything before the table that contains the data we are interested in thus eliminating
# the invalid html portions
try:
data = data.split('<div id="information"></div>')[1]
index = data.index('<table')
except ValueError:
logging.error("Could not find main torrent table")
continue
html = BeautifulSoup(data[index:], 'html5lib')
if not html:
logging.debug("No html data parsed from provider")
continue
torrents = html.findAll('tr')
if not torrents:
continue
# Skip column headers
for result in torrents[1:]:
if len(result.contents) < 10:
# skip extraneous rows at the end
continue
try:
dl_href = result.find('a', attrs={'href': re.compile(r'download.php.*')})['href']
title = re.search('f=(.*).torrent', dl_href).group(1).replace('+', '.')
download_url = self.urls[b'base_url'] + dl_href
seeders = int(result.find('span', attrs={'class': 'seedy'}).find('a').text)
leechers = int(result.find('span', attrs={'class': 'leechy'}).find('a').text)
size = re.match(r'.*?([0-9]+,?\.?[0-9]* [KkMmGg]+[Bb]+).*', str(result), re.DOTALL).group(1)
if not all([title, download_url]):
continue
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode is not 'RSS':
logging.debug(
"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(
title, seeders, leechers))
continue
item = title, download_url, size, seeders, leechers
if mode is not 'RSS':
logging.debug("Found result: %s " % title)
items[mode].append(item)
except (AttributeError, TypeError, KeyError, ValueError):
continue
# For each search mode sort all the items by seeders if available
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def seedRatio(self):
return self.ratio
def _convertSize(self, size):
size, modifier = size.split(' ')
size = float(size)
if modifier in 'KB':
size = size * 1024
elif modifier in 'MB':
size = size * 1024 ** 2
elif modifier in 'GB':
size = size * 1024 ** 3
elif modifier in 'TB':
size = size * 1024 ** 4
return int(size)
class HDSpaceCache(tvcache.TVCache):
def __init__(self, provider_obj):
tvcache.TVCache.__init__(self, provider_obj)
# only poll HDSpace every 10 minutes max
self.minTime = 10
def _getRSSData(self):
search_strings = {'RSS': ['']}
return {'entries': self.provider._doSearch(search_strings)}
provider = HDSpaceProvider()
|
dichenko/kpk2016 | refs/heads/master | OOP/02_Vector2d.py | 1 | class Vector2D():
def __init__(self,x=0,y=0):
self._x = x
self._y = y
def __add__(self,other):
'''
#Переопределяем стандартный метод, чтобы сложение векторов
давало нам именно то что мы хотим
:return: метод отвечает за операнд "+", складывая координаты.
'''
x = self._x + other._x
y = self._y + other._y
return Vector2D(x,y)
def __str__(self):
"""Переопределяем Print для класса
:return: #отвечает за то что ббудет выведено на печать при печати экземпляра класса
"""
return '(%d, %d)'%(self._x, self._y)
def __mul__(self,other):
"""Переопределяем умножение для класса"""
x = self._x * other._x
y = self._y * other._y
return Vector2D(x+y)
a = Vector2D(2,2)
b = Vector2D(-1,3)
c = a * b
print(a, '*', b, '=',c)
|
Rademade/taiga-back | refs/heads/master | tests/integration/test_attachments.py | 3 | import pytest
from django.core.urlresolvers import reverse
from django.core.files.uploadedfile import SimpleUploadedFile
from .. import factories as f
pytestmark = pytest.mark.django_db
def test_create_user_story_attachment_without_file(client):
"""
Bug test "Don't create attachments without attached_file"
"""
us = f.UserStoryFactory.create()
f.MembershipFactory(project=us.project, user=us.owner, is_admin=True)
attachment_data = {
"description": "test",
"attached_file": None,
"project": us.project_id,
}
url = reverse('userstory-attachments-list')
client.login(us.owner)
response = client.post(url, attachment_data)
assert response.status_code == 400
def test_create_attachment_on_wrong_project(client):
issue1 = f.create_issue()
issue2 = f.create_issue(owner=issue1.owner)
f.MembershipFactory(project=issue1.project, user=issue1.owner, is_admin=True)
assert issue1.owner == issue2.owner
assert issue1.project.owner == issue2.project.owner
url = reverse("issue-attachments-list")
data = {"description": "test",
"object_id": issue2.pk,
"project": issue1.project.id,
"attached_file": SimpleUploadedFile("test.txt", b"test")}
client.login(issue1.owner)
response = client.post(url, data)
assert response.status_code == 400
def test_create_attachment_with_long_file_name(client):
issue1 = f.create_issue()
f.MembershipFactory(project=issue1.project, user=issue1.owner, is_admin=True)
url = reverse("issue-attachments-list")
data = {"description": "test",
"object_id": issue1.pk,
"project": issue1.project.id,
"attached_file": SimpleUploadedFile(500*"x"+".txt", b"test")}
client.login(issue1.owner)
response = client.post(url, data)
assert response.data["attached_file"].endswith("/"+100*"x"+".txt")
|
proycon/anavec | refs/heads/master | anavec/icdar_process.py | 1 | #!/usr/bin/env python3
#-------------------------------------------------------------------
# Processing script for ICDAR 2017 Post-OCR Text Correction challenge
# Uses anavec
#-------------------------------------------------------------------
# by Maarten van Gompel
# Radboud University Nijmegen
# Licensed under GPLv3
import os
import argparse
import sys
import glob
import json
from anavec.anavec import Corrector, setup_argparser, InputTokenState, readinput
def loadtext(testfile):
"""Load the text from a test file"""
with open(testfile,'r',encoding='utf-8') as f:
for line in f:
if line.startswith("[OCR_toInput]"):
return line[len("[OCR_toInput]") + 1:]
else:
raise Exception("Unexpected input format, expected [OCR_toInput] on first line")
raise Exception("No text found")
def readpositiondata(positionfile):
with open(positionfile,'r',encoding='utf-8') as f:
positiondata = json.load(f)
return positiondata
def setbreakpoints(testtokens, mask, blocksize, eager=False):
"""input is all on one line, this will overwhelm the decoder, split into 'lines' at points where punctuation likely indicates a sentence ending"""
begin = 0
blockbegin = 0
for i, (testtoken, state) in enumerate(zip(testtokens, mask)):
if testtoken == '.' or (eager and testtoken[-1] == '.' and i+1 < len(testtokens) and mask[i+1] & InputTokenState.CORRECT):
if i - begin >= 6 and i+1 < len(testtokens) and testtokens[i+1][0].isalpha() and testtokens[i+1][0] == testtokens[i+1][0].upper():
mask[i] |= InputTokenState.EOL
begin = i
if i - blockbegin >= blocksize:
mask[i] |= InputTokenState.EOB
blockbegin = i
def process(corrector, testfiles, args):
icdar_results = {} #results as per challenge specification
if args.positionfile:
positiondata = readpositiondata(args.positionfile)
elif args.task == 2:
raise Exception("No position file specified, required for task 2!")
for testfile in testfiles:
print(" === PROCESSING " + testfile + " === ",file=sys.stderr)
icdar_results[testfile] = {} #results as per challenge specification
text = loadtext(testfile)
lines = text.strip('\n').split('\n') #should actually only split into one item for this task
if len(lines) != 1:
raise Exception("Input file " + testfile + " contains more lines! Invalid according to specification!")
testtokens, mask, positions = readinput(lines, False, args.blocksize)
#input is all on one line, this will overwhelm the decoder, split into 'lines' at points where punctuation likely indicates a sentence ending
setbreakpoints(testtokens, mask, args.blocksize, eager=True)
if args.positionfile:
if testfile not in positiondata:
print("WARNING: Testfile " + testfile + " is not found in the position data! Skipping!", file=sys.stderr)
continue
elif not positiondata[testfile]:
#found but empty (happens it seems, just move on to the next testfile
print("WARNING: Testfile " + testfile + " exists but has no positions the position data! Skipping!", file=sys.stderr)
continue
refpositions = { int(positiontuple.split(':')[0]): int(positiontuple.split(':')[1]) for positiontuple in positiondata[testfile] }
else:
refpositions = {}
foundpositions = {} #bookkeeping for task 2
assert len(testtokens) == len(mask)
assert len(mask) == len(positions)
if args.positionfile:
#prepare merged data structures, we will copy everything into this and merge tokens that are to be treated as one according to the reference positions
testtokens_merged = []
mask_merged = []
mergelength = 0
positions_merged = [] #this one we use always (converts 3-tuple to 4-tuples by adding tokenlength explicitly, even if it's just 1 for all )
for i, (token, state, (beginchar, endchar,punctail)) in enumerate(zip(testtokens, mask, positions)):
if beginchar is None:
#token is trailing punctuation
print(" input token #" + str(i) + " (trailing punctuation) -> " + punctail,file=sys.stderr)
if args.positionfile and mergelength == 0:
testtokens_merged.append(token)
mask_merged.append(state)
if mergelength == 0: positions_merged.append( (beginchar, 1, endchar, punctail) )
else:
if beginchar in refpositions:
print(" REFERENCED token #" + str(i) + " (l=" + str(refpositions[beginchar]) + ") " + testfile + "@" + str(beginchar) + ":1 " + text[beginchar:endchar] + " -> " + token,end="",file=sys.stderr)
if args.positionfile:
mask[i] |= InputTokenState.INCORRECT #force correction
state = mask[i]
foundpositions[beginchar] = True
if refpositions[beginchar] > 1:
token += punctail #consume trailing punctuation as part of token, it's no longer trailing
mergelength = refpositions[beginchar]-1
print("\t[MERGING WITH NEXT " + str(mergelength)+" AS ",end="",file=sys.stderr)
origoffset = 1
offset = 1
while origoffset <= mergelength:
beginchar2, endchar2, punctail2 = positions[i+offset]
if beginchar2 is None:
#trailing punctuation
token += testtokens[i+offset]
else:
token += " " + testtokens[i+offset]
if origoffset == mergelength:
punctail = punctail2
endchar = endchar2
else:
token += punctail2
origoffset += 1
offset += 1
print(token + "]",end="",file=sys.stderr)
testtokens_merged.append(token)
state = InputTokenState.CORRECTABLE
mask_merged.append(state)
positions_merged.append( (beginchar, refpositions[beginchar], endchar, punctail) )
print("\t[MERGE SPANS CHARS " + str(beginchar)+" TO " + str(endchar) ,end="",file=sys.stderr)
else:
print(" input token #" + str(i) + " " + testfile + "@" + str(beginchar) + ":1 " + text[beginchar:endchar] + " -> " + token,end="",file=sys.stderr)
if args.positionfile:
if mergelength > 0:
mergelength -= 1
print("\t[MERGED]",file=sys.stderr)
continue
#mark this token as correct (it's not in the positions file)
mask[i] |= InputTokenState.CORRECT #force correction
state = mask[i]
testtokens_merged.append(token)
mask_merged.append(state)
positions_merged.append( (beginchar, 1, endchar, punctail) )
if punctail:
print("\t[punctail=" + punctail + "]",end="",file=sys.stderr)
if state & InputTokenState.CORRECT:
print("\t[KEEP]",end="",file=sys.stderr)
elif state & InputTokenState.INCORRECT:
print("\t[FORCE-PROCESS]",end="",file=sys.stderr)
print(file=sys.stderr)
if state & InputTokenState.EOL:
print(" Tokenisation --eol--",file=sys.stderr)
if state & InputTokenState.EOB:
print(" Tokenisation --end of block--",file=sys.stderr)
for beginchar in sorted(refpositions):
if beginchar not in foundpositions:
print("WARNING: Position @" + str(beginchar) + ":" + str(refpositions[beginchar]) + " was not found in " + testfile,file=sys.stderr)
if args.task not in (1,2): continue
if args.positionfile: #task 2
testtokens = testtokens_merged
mask = mask_merged
positions = positions_merged
foundpositions = {}
assert len(testtokens) == len(mask)
assert len(mask) == len(positions)
for results in corrector.correct(testtokens, mask):
print("Corrector input: ", " ".join(results.testtokens),file=sys.stderr)
print("Corrector best output: ", str(results.top[0]),file=sys.stderr)
if args.options == 0:
print("(Primary source is decoder output)",file=sys.stderr)
for candidate in results.top[0]:
index = results.offset + candidate.hypothesis.index
beginchar, origtokenlength, endchar, punctail = positions[index]
if beginchar is None:
#ignore trailing punctuation
continue
if candidate.error or (args.positionfile and beginchar in refpositions):
tokenlength = candidate.hypothesis.length #in tokens
correction = candidate.text
if args.positionfile:
if beginchar not in refpositions or refpositions[beginchar] != origtokenlength:
#task 2: not a reference token, ignore
continue
else:
foundpositions[beginchar] = True
#re-add any trailing punctuation
correction += punctail
original = text[beginchar:endchar]
print(" Correction [" + testfile + "@" + str(beginchar) + ":" + str(origtokenlength) + "] " + original + " -> " + correction, file=sys.stderr)
icdar_results[testfile][str(beginchar)+":"+str(origtokenlength)] = { correction: candidate.score }
else:
print("(Primary source is candidate tree)",file=sys.stderr)
for index in sorted(results.candidatetree):
globalindex = results.offset + index
beginchar, origtokenlength, endchar, punctail = positions[globalindex]
if beginchar is None:
#ignore trailing punctuation
continue
if args.positionfile:
if beginchar not in refpositions:
continue
foundpositions[beginchar] = True
if 1 in results.candidatetree[index]:
candidates = list(sorted(results.candidatetree[index][1], key=lambda x: (x.lmselect * -1, x.logprob * -1)))[:args.options]
if candidates:
#scoresum = sum( (candidate.score for candidate in candidates ) )
original = text[beginchar:endchar]
print(" Correction [" + testfile + "@" + str(beginchar) + ":" + str(origtokenlength) + "] " + original + " -> " + "; ".join([ candidate.text + " (" + str(10**candidate.logprob) + ") " for candidate in candidates]) + " [punctail=" + punctail+"]", file=sys.stderr)
icdar_results[testfile][str(beginchar)+":"+str(origtokenlength)] = { candidate.text + punctail: 10**candidate.logprob for candidate in candidates }
for beginchar in sorted(refpositions):
if beginchar not in foundpositions:
print("WARNING: Position @" + str(beginchar) + ":" + str(refpositions[beginchar]) + " was not corrected in " + testfile,file=sys.stderr)
return icdar_results
def main():
parser = argparse.ArgumentParser(description="ICDAR 2017 Post-OCR Correction Processing Script for Task 2 with Anavec", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input', type=str, help="Input file or directory (*.txt files)", action='store',required=True)
parser.add_argument('--positionfile', type=str, help="Input file with position information (erroneous_tokens_pos.json), required for task 2", action='store',required=False)
parser.add_argument('--options', type=int, help="Maximum number of options to output, if set to 0 (default), the best option according to the decoder will outputted. For values higher than 0, the candidate tree will be explicitly consulted instead, which limits the use of ngrams", action='store',default=0)
parser.add_argument('--task', type=int, help="Task", action='store',required=True)
setup_argparser(parser) #for anavec
args = parser.parse_args()
args.lmwin = True
if os.path.isdir(args.input):
testfiles = []
for f in glob.glob(args.input + "/*.txt"):
testfiles.append(f)
else:
testfiles = [args.input]
print("Found testfiles:", testfiles,file=sys.stderr)
if args.task > 0:
corrector = Corrector(**vars(args))
else:
corrector = None
results = process(corrector, testfiles, args)
#if args.task == 1:
# results = process_task1(corrector, testfiles, args)
#elif args.task == 2:
# results = process_task2(corrector, testfiles, args.positionfile, args)
#else:
# raise NotImplementedError
#Output results as JSON to stdout
print(json.dumps(results, ensure_ascii=False, indent=4))
if __name__ == '__main__':
main()
|
savoirfairelinux/django | refs/heads/master | django/contrib/postgres/forms/ranges.py | 43 | from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange
from django import forms
from django.core import exceptions
from django.forms.widgets import MultiWidget
from django.utils.translation import gettext_lazy as _
__all__ = [
'BaseRangeField', 'IntegerRangeField', 'FloatRangeField',
'DateTimeRangeField', 'DateRangeField', 'RangeWidget',
]
class BaseRangeField(forms.MultiValueField):
default_error_messages = {
'invalid': _('Enter two valid values.'),
'bound_ordering': _('The start of the range must not exceed the end of the range.'),
}
def __init__(self, **kwargs):
if 'widget' not in kwargs:
kwargs['widget'] = RangeWidget(self.base_field.widget)
if 'fields' not in kwargs:
kwargs['fields'] = [self.base_field(required=False), self.base_field(required=False)]
kwargs.setdefault('required', False)
kwargs.setdefault('require_all_fields', False)
super().__init__(**kwargs)
def prepare_value(self, value):
lower_base, upper_base = self.fields
if isinstance(value, self.range_type):
return [
lower_base.prepare_value(value.lower),
upper_base.prepare_value(value.upper),
]
if value is None:
return [
lower_base.prepare_value(None),
upper_base.prepare_value(None),
]
return value
def compress(self, values):
if not values:
return None
lower, upper = values
if lower is not None and upper is not None and lower > upper:
raise exceptions.ValidationError(
self.error_messages['bound_ordering'],
code='bound_ordering',
)
try:
range_value = self.range_type(lower, upper)
except TypeError:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
)
else:
return range_value
class IntegerRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two whole numbers.')}
base_field = forms.IntegerField
range_type = NumericRange
class FloatRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two numbers.')}
base_field = forms.FloatField
range_type = NumericRange
class DateTimeRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two valid date/times.')}
base_field = forms.DateTimeField
range_type = DateTimeTZRange
class DateRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two valid dates.')}
base_field = forms.DateField
range_type = DateRange
class RangeWidget(MultiWidget):
def __init__(self, base_widget, attrs=None):
widgets = (base_widget, base_widget)
super().__init__(widgets, attrs)
def decompress(self, value):
if value:
return (value.lower, value.upper)
return (None, None)
|
olivierdalang/stdm | refs/heads/master | ui/helpers/__init__.py | 1 | from .dirtytracker import ControlDirtyTracker,ControlDirtyTrackerCollection, \
ControlReaderMapper
from .valuehandlers import (
CheckBoxValueHandler,
ControlValueHandler,
LineEditValueHandler,
ComboBoxValueHandler,
TextEditValueHandler,
DateEditValueHandler,
SourceDocManagerValueHandler,
ForeignKeyMapperValueHandler,
SpinBoxValueHandler,
DoubleSpinBoxValueHandler,
CoordinatesWidgetValueHandler
)
from .datamanagemixin import SupportsManageMixin
def valueHandler(ctl):
'''
Factory that returns the corresponding value handler based on the control type.
'''
ctlName = str(ctl.metaObject().className())
if ctlName in ControlValueHandler.handlers:
return ControlValueHandler.handlers[ctlName]
else:
return None
|
ZLLab-Mooc/edx-platform | refs/heads/named-release/dogwood.rc | openedx/core/djangoapps/programs/__init__.py | 40 | """
Platform support for Programs.
This package is a thin wrapper around interactions with the Programs service,
supporting learner- and author-facing features involving that service
if and only if the service is deployed in the Open edX installation.
To ensure maximum separation of concerns, and a minimum of interdependencies,
this package should be kept small, thin, and stateless.
"""
|
soasme/wikisensei | refs/heads/master | wikisensei/stream/admin.py | 469 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
# Register your models here.
|
ArcticCore/arcticcoin | refs/heads/master | qa/rpc-tests/rpcbind_test.py | 69 | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test for -rpcbind, as well as -rpcallowip and -rpcconnect
# TODO extend this test from the test framework (like all other tests)
import tempfile
import traceback
from test_framework.util import *
from test_framework.netutil import *
def run_bind_test(tmpdir, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
nodes = start_nodes(1, tmpdir, [base_args + binds], connect_to)
try:
pid = bitcoind_processes[0].pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
finally:
stop_nodes(nodes)
wait_bitcoinds()
def run_allowip_test(tmpdir, allow_ips, rpchost, rpcport):
'''
Start a node with rpcwallow IP, and request getinfo
at a non-localhost IP.
'''
base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
nodes = start_nodes(1, tmpdir, [base_args])
try:
# connect to node through non-loopback interface
url = "http://rt:rt@%s:%d" % (rpchost, rpcport,)
node = get_rpc_proxy(url, 1)
node.getinfo()
finally:
node = None # make sure connection will be garbage collected and closed
stop_nodes(nodes)
wait_bitcoinds()
def run_test(tmpdir):
assert(sys.platform == 'linux2') # due to OS-specific network stats queries, this test works only on Linux
# find the first non-loopback interface for testing
non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
non_loopback_ip = ip
break
if non_loopback_ip is None:
assert(not 'This test requires at least one non-loopback IPv4 interface')
print("Using interface %s for testing" % non_loopback_ip)
defaultport = rpc_port(0)
# check default without rpcallowip (IPv4 and IPv6 localhost)
run_bind_test(tmpdir, None, '127.0.0.1', [],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check default with rpcallowip (IPv6 any)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', [],
[('::0', defaultport)])
# check only IPv4 localhost (explicit)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', defaultport)])
# check only IPv4 localhost (explicit) with alternative port
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
# check only IPv6 localhost (explicit)
run_bind_test(tmpdir, ['[::1]'], '[::1]', ['[::1]'],
[('::1', defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check only non-loopback interface
run_bind_test(tmpdir, [non_loopback_ip], non_loopback_ip, [non_loopback_ip],
[(non_loopback_ip, defaultport)])
# Check that with invalid rpcallowip, we are denied
run_allowip_test(tmpdir, [non_loopback_ip], non_loopback_ip, defaultport)
try:
run_allowip_test(tmpdir, ['1.1.1.1'], non_loopback_ip, defaultport)
assert(not 'Connection not denied by rpcallowip as expected')
except ValueError:
pass
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing bitcoind/bitcoin-cli (default: %default%)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
(options, args) = parser.parse_args()
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
check_json_precision()
success = False
nodes = []
try:
print("Initializing test directory "+options.tmpdir)
if not os.path.isdir(options.tmpdir):
os.makedirs(options.tmpdir)
initialize_chain(options.tmpdir)
run_test(options.tmpdir)
success = True
except AssertionError as e:
print("Assertion failed: "+e.message)
except Exception as e:
print("Unexpected exception caught during testing: "+str(e))
traceback.print_tb(sys.exc_info()[2])
if not options.nocleanup:
print("Cleaning up")
wait_bitcoinds()
shutil.rmtree(options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
if __name__ == '__main__':
main()
|
nimasmi/wagtail | refs/heads/master | wagtail/images/tests/urls.py | 9 | from django.conf.urls import url
from wagtail.images.views.serve import SendFileView, ServeView
from wagtail.tests import dummy_sendfile_backend
urlpatterns = [
url(r'^actions/serve/(.*)/(\d*)/(.*)/[^/]*', ServeView.as_view(action='serve'), name='wagtailimages_serve_action_serve'),
url(r'^actions/redirect/(.*)/(\d*)/(.*)/[^/]*', ServeView.as_view(action='redirect'), name='wagtailimages_serve_action_redirect'),
url(r'^custom_key/(.*)/(\d*)/(.*)/[^/]*', ServeView.as_view(key='custom'), name='wagtailimages_serve_custom_key'),
url(r'^custom_view/([^/]*)/(\d*)/([^/]*)/[^/]*$', ServeView.as_view(), name='wagtailimages_serve_custom_view'),
url(r'^sendfile/(.*)/(\d*)/(.*)/[^/]*', SendFileView.as_view(), name='wagtailimages_sendfile'),
url(r'^sendfile-dummy/(.*)/(\d*)/(.*)/[^/]*', SendFileView.as_view(backend=dummy_sendfile_backend.sendfile), name='wagtailimages_sendfile_dummy'),
]
|
Nick-OpusVL/odoo | refs/heads/8.0 | addons/l10n_cl/__openerp__.py | 260 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Cubic ERP - Teradata SAC (<http://cubicerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Chile Localization Chart Account',
'version': '1.0',
'description': """
Chilean accounting chart and tax localization.
==============================================
Plan contable chileno e impuestos de acuerdo a disposiciones vigentes
""",
'author': 'Cubic ERP',
'website': 'http://cubicERP.com',
'category': 'Localization/Account Charts',
'depends': ['account_chart'],
'data': [
'account_tax_code.xml',
'l10n_cl_chart.xml',
'account_tax.xml',
'l10n_cl_wizard.xml',
],
'demo': [],
'active': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
hryamzik/ansible | refs/heads/devel | test/units/modules/network/netscaler/test_netscaler_cs_action.py | 18 |
# Copyright (c) 2017 Citrix Systems
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from ansible.compat.tests.mock import patch, Mock, MagicMock, call
from units.modules.utils import set_module_args
from .netscaler_module import TestModule, nitro_base_patcher
import sys
if sys.version_info[:2] != (2, 6):
import requests
class TestNetscalerCSActionModule(TestModule):
@classmethod
def setUpClass(cls):
class MockException(Exception):
pass
cls.MockException = MockException
m = MagicMock()
cls.cs_action_mock = MagicMock()
cls.cs_action_mock.__class__ = MagicMock(add=Mock())
nssrc_modules_mock = {
'nssrc.com.citrix.netscaler.nitro.resource.config.cs': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.cs.csaction': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.cs.csaction.csaction': cls.cs_action_mock,
}
cls.nitro_specific_patcher = patch.dict(sys.modules, nssrc_modules_mock)
cls.nitro_base_patcher = nitro_base_patcher
@classmethod
def tearDownClass(cls):
cls.nitro_base_patcher.stop()
cls.nitro_specific_patcher.stop()
def setUp(self):
super(TestNetscalerCSActionModule, self).setUp()
self.nitro_base_patcher.start()
self.nitro_specific_patcher.start()
# Setup minimal required arguments to pass AnsibleModule argument parsing
def tearDown(self):
super(TestNetscalerCSActionModule, self).tearDown()
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
def test_graceful_nitro_api_import_error(self):
# Stop nitro api patching to cause ImportError
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
from ansible.modules.network.netscaler import netscaler_cs_action
self.module = netscaler_cs_action
result = self.failed()
self.assertEqual(result['msg'], 'Could not load nitro python sdk')
def test_graceful_nitro_error_on_login(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_cs_action
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
client_mock = Mock()
client_mock.login = Mock(side_effect=MockException)
m = Mock(return_value=client_mock)
with patch('ansible.modules.network.netscaler.netscaler_cs_action.get_nitro_client', m):
with patch('ansible.modules.network.netscaler.netscaler_cs_action.nitro_exception', MockException):
self.module = netscaler_cs_action
result = self.failed()
self.assertTrue(result['msg'].startswith('nitro exception'), msg='nitro exception during login not handled properly')
def test_graceful_no_connection_error(self):
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_cs_action
class MockException(Exception):
pass
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.ConnectionError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_action',
get_nitro_client=m,
nitro_exception=MockException,
):
self.module = netscaler_cs_action
result = self.failed()
self.assertTrue(result['msg'].startswith('Connection error'), msg='Connection error was not handled gracefully')
def test_graceful_login_error(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_cs_action
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
class MockException(Exception):
pass
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.SSLError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_action',
get_nitro_client=m,
nitro_exception=MockException,
):
self.module = netscaler_cs_action
result = self.failed()
self.assertTrue(result['msg'].startswith('SSL Error'), msg='SSL Error was not handled gracefully')
def test_save_config_called_on_state_present(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_cs_action
client_mock = Mock()
m = Mock(return_value=client_mock)
cs_action_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_action',
get_nitro_client=m,
action_exists=Mock(side_effect=[False, True]),
ensure_feature_is_enabled=Mock(return_value=True),
diff_list=Mock(return_value={}),
ConfigProxy=Mock(return_value=cs_action_proxy_mock),
):
self.module = netscaler_cs_action
self.exited()
self.assertIn(call.save_config(), client_mock.mock_calls)
def test_save_config_called_on_state_absent(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_cs_action
client_mock = Mock()
m = Mock(return_value=client_mock)
cs_action_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_action',
get_nitro_client=m,
action_exists=Mock(side_effect=[True, False]),
ensure_feature_is_enabled=Mock(return_value=True),
ConfigProxy=Mock(return_value=cs_action_proxy_mock),
):
self.module = netscaler_cs_action
self.exited()
self.assertIn(call.save_config(), client_mock.mock_calls)
def test_save_config_not_called_on_state_present(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_cs_action
client_mock = Mock()
m = Mock(return_value=client_mock)
cs_action_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_action',
get_nitro_client=m,
action_exists=Mock(side_effect=[False, True]),
diff_list=Mock(return_value={}),
ensure_feature_is_enabled=Mock(return_value=True),
ConfigProxy=Mock(return_value=cs_action_proxy_mock),
):
self.module = netscaler_cs_action
self.exited()
self.assertNotIn(call.save_config(), client_mock.mock_calls)
def test_save_config_not_called_on_state_absent(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_cs_action
client_mock = Mock()
m = Mock(return_value=client_mock)
cs_action_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_action',
get_nitro_client=m,
action_exists=Mock(side_effect=[True, False]),
ensure_feature_is_enabled=Mock(return_value=True),
ConfigProxy=Mock(return_value=cs_action_proxy_mock),
):
self.module = netscaler_cs_action
self.exited()
self.assertNotIn(call.save_config(), client_mock.mock_calls)
def test_new_cs_action_execution_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_cs_action
client_mock = Mock()
m = Mock(return_value=client_mock)
server_proxy_attrs = {
'diff_object.return_value': {},
}
cs_action_proxy_mock = Mock()
cs_action_proxy_mock.configure_mock(**server_proxy_attrs)
config_proxy_mock = Mock(return_value=cs_action_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_action',
get_nitro_client=m,
action_exists=Mock(side_effect=[False, True]),
action_identical=Mock(side_effect=[True]),
ensure_feature_is_enabled=Mock(return_value=True),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_cs_action
self.exited()
cs_action_proxy_mock.assert_has_calls([call.add()])
def test_modified_cs_action_execution_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_cs_action
client_mock = Mock()
m = Mock(return_value=client_mock)
server_proxy_attrs = {
'diff_object.return_value': {},
}
cs_action_proxy_mock = Mock()
cs_action_proxy_mock.configure_mock(**server_proxy_attrs)
config_proxy_mock = Mock(return_value=cs_action_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_action',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
action_exists=Mock(side_effect=[True, True]),
action_identical=Mock(side_effect=[False, True]),
ensure_feature_is_enabled=Mock(return_value=True),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_cs_action
self.exited()
cs_action_proxy_mock.assert_has_calls([call.update()])
def test_absent_cs_action_execution_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_cs_action
client_mock = Mock()
m = Mock(return_value=client_mock)
server_proxy_attrs = {
'diff_object.return_value': {},
}
cs_action_proxy_mock = Mock()
cs_action_proxy_mock.configure_mock(**server_proxy_attrs)
config_proxy_mock = Mock(return_value=cs_action_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_action',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
action_exists=Mock(side_effect=[True, False]),
action_identical=Mock(side_effect=[False, True]),
ensure_feature_is_enabled=Mock(return_value=True),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_cs_action
self.exited()
cs_action_proxy_mock.assert_has_calls([call.delete()])
def test_present_cs_action_identical_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_cs_action
client_mock = Mock()
m = Mock(return_value=client_mock)
server_proxy_attrs = {
'diff_object.return_value': {},
}
cs_action_proxy_mock = Mock()
cs_action_proxy_mock.configure_mock(**server_proxy_attrs)
config_proxy_mock = Mock(return_value=cs_action_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_action',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
action_exists=Mock(side_effect=[True, True]),
action_identical=Mock(side_effect=[True, True]),
ensure_feature_is_enabled=Mock(return_value=True),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_cs_action
self.exited()
cs_action_proxy_mock.assert_not_called()
def test_absent_cs_action_noop_flow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_cs_action
client_mock = Mock()
m = Mock(return_value=client_mock)
server_proxy_attrs = {
'diff_object.return_value': {},
}
cs_action_proxy_mock = Mock()
cs_action_proxy_mock.configure_mock(**server_proxy_attrs)
config_proxy_mock = Mock(return_value=cs_action_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_action',
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
action_exists=Mock(side_effect=[False, False]),
action_identical=Mock(side_effect=[False, False]),
ensure_feature_is_enabled=Mock(return_value=True),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_cs_action
self.exited()
cs_action_proxy_mock.assert_not_called()
def test_present_cs_action_failed_update(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_cs_action
client_mock = Mock()
m = Mock(return_value=client_mock)
server_proxy_attrs = {
'diff_object.return_value': {},
}
cs_action_proxy_mock = Mock()
cs_action_proxy_mock.configure_mock(**server_proxy_attrs)
config_proxy_mock = Mock(return_value=cs_action_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_action',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
action_exists=Mock(side_effect=[True, True]),
action_identical=Mock(side_effect=[False, False]),
ensure_feature_is_enabled=Mock(return_value=True),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_cs_action
result = self.failed()
self.assertEqual(result['msg'], 'Content switching action differs from configured')
self.assertTrue(result['failed'])
def test_present_cs_action_failed_create(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_cs_action
client_mock = Mock()
m = Mock(return_value=client_mock)
server_proxy_attrs = {
'diff_object.return_value': {},
}
cs_action_proxy_mock = Mock()
cs_action_proxy_mock.configure_mock(**server_proxy_attrs)
config_proxy_mock = Mock(return_value=cs_action_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_action',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
action_exists=Mock(side_effect=[False, False]),
action_identical=Mock(side_effect=[False, False]),
ensure_feature_is_enabled=Mock(return_value=True),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_cs_action
result = self.failed()
self.assertEqual(result['msg'], 'Content switching action does not exist')
self.assertTrue(result['failed'])
def test_present_cs_action_update_immutable_attribute(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_cs_action
client_mock = Mock()
m = Mock(return_value=client_mock)
server_proxy_attrs = {
'diff_object.return_value': {},
}
cs_action_proxy_mock = Mock()
cs_action_proxy_mock.configure_mock(**server_proxy_attrs)
config_proxy_mock = Mock(return_value=cs_action_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_action',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=['domain']),
action_exists=Mock(side_effect=[True, True]),
action_identical=Mock(side_effect=[False, False]),
ensure_feature_is_enabled=Mock(return_value=True),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_cs_action
result = self.failed()
self.assertEqual(result['msg'], 'Cannot update immutable attributes [\'domain\']')
self.assertTrue(result['failed'])
def test_absent_cs_action_failed_delete(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_cs_action
client_mock = Mock()
m = Mock(return_value=client_mock)
server_proxy_attrs = {
'diff_object.return_value': {},
}
cs_action_proxy_mock = Mock()
cs_action_proxy_mock.configure_mock(**server_proxy_attrs)
config_proxy_mock = Mock(return_value=cs_action_proxy_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_action',
nitro_exception=self.MockException,
get_nitro_client=m,
diff_list=Mock(return_value={}),
get_immutables_intersection=Mock(return_value=[]),
action_exists=Mock(side_effect=[True, True]),
action_identical=Mock(side_effect=[False, False]),
ensure_feature_is_enabled=Mock(return_value=True),
ConfigProxy=config_proxy_mock,
):
self.module = netscaler_cs_action
result = self.failed()
self.assertEqual(result['msg'], 'Content switching action still exists')
self.assertTrue(result['failed'])
def test_graceful_nitro_exception_state_present(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_cs_action
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
m = Mock(side_effect=MockException)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_action',
action_exists=m,
ensure_feature_is_enabled=Mock(return_value=True),
nitro_exception=MockException
):
self.module = netscaler_cs_action
result = self.failed()
self.assertTrue(
result['msg'].startswith('nitro exception'),
msg='Nitro exception not caught on operation absent'
)
def test_graceful_nitro_exception_state_absent(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='192.0.2.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_cs_action
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
m = Mock(side_effect=MockException)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_cs_action',
action_exists=m,
ensure_feature_is_enabled=Mock(return_value=True),
nitro_exception=MockException
):
self.module = netscaler_cs_action
result = self.failed()
self.assertTrue(
result['msg'].startswith('nitro exception'),
msg='Nitro exception not caught on operation absent'
)
|
MobinRanjbar/hue | refs/heads/master | desktop/core/ext-py/python-ldap-2.3.13/Demo/reconnect.py | 40 | import sys,time,ldap,ldap.ldapobject,ldapurl
from ldap.ldapobject import *
ldap_url = ldapurl.LDAPUrl(sys.argv[1])
ldap_url.applyDefaults({
'who':'',
'cred':'',
'filterstr':'(objectClass=*)',
'scope':ldap.SCOPE_BASE
})
ldap.trace_level=1
l = ldap.ldapobject.ReconnectLDAPObject(
ldap_url.initializeUrl(),trace_level=ldap.trace_level
)
l.protocol_version = ldap.VERSION3
l.simple_bind_s(ldap_url.who,ldap_url.cred)
while 1:
l.search_s(ldap_url.dn,ldap_url.scope,ldap_url.filterstr,ldap_url.attrs)
sys.stdin.readline()
|
adityahase/frappe | refs/heads/develop | frappe/patches/v12_0/delete_duplicate_indexes.py | 3 | import frappe
from pymysql import InternalError
# This patch deletes all the duplicate indexes created for same column
# The patch only checks for indexes with UNIQUE constraints
def execute():
if frappe.db.db_type != 'mariadb':
return
all_tables = frappe.db.get_tables()
final_deletion_map = frappe._dict()
for table in all_tables:
indexes_to_keep_map = frappe._dict()
indexes_to_delete = []
index_info = frappe.db.sql("""
SELECT
column_name,
index_name,
non_unique
FROM information_schema.STATISTICS
WHERE table_name=%s
AND column_name!='name'
AND non_unique=0
ORDER BY index_name;
""", table, as_dict=1)
for index in index_info:
if not indexes_to_keep_map.get(index.column_name):
indexes_to_keep_map[index.column_name] = index
else:
indexes_to_delete.append(index.index_name)
if indexes_to_delete:
final_deletion_map[table] = indexes_to_delete
# build drop index query
for (table_name, index_list) in final_deletion_map.items():
query_list = []
alter_query = "ALTER TABLE `{}`".format(table_name)
for index in index_list:
query_list.append("{} DROP INDEX `{}`".format(alter_query, index))
for query in query_list:
try:
frappe.db.sql(query)
except InternalError:
pass
|
bjko/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/applypatch.py | 128 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
_log = logging.getLogger(__name__)
class ApplyPatch(AbstractStep):
@classmethod
def options(cls):
return AbstractStep.options() + [
Options.non_interactive,
]
def run(self, state):
_log.info("Processing patch %s from bug %s." % (state["patch"].id(), state["patch"].bug_id()))
self._tool.checkout().apply_patch(state["patch"])
|
awduda/awduda.github.io | refs/heads/master | venv/lib/python2.7/copy_reg.py | 4 | /usr/lib/python2.7/copy_reg.py |
gennad/Django-nonrel-stub-for-Google-App-Engine | refs/heads/master | django/db/models/base.py | 1 | import types
import sys
from itertools import izip
import django.db.models.manager # Imported to register signal handler.
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned, FieldError, ValidationError, NON_FIELD_ERRORS
from django.core import validators
from django.db.models.fields import AutoField, FieldDoesNotExist
from django.db.models.fields.related import (OneToOneRel, ManyToOneRel,
OneToOneField, add_lazy_relation)
from django.db.models.query import Q
from django.db.models.query_utils import DeferredAttribute
from django.db.models.deletion import Collector
from django.db.models.options import Options
from django.db import (connections, router, transaction, DatabaseError,
DEFAULT_DB_ALIAS)
from django.db.models import signals
from django.db.models.loading import register_models, get_model
from django.utils.translation import ugettext_lazy as _
import django.utils.copycompat as copy
from django.utils.functional import curry, update_wrapper
from django.utils.encoding import smart_str, force_unicode
from django.utils.text import get_text_list, capfirst
from django.conf import settings
class ModelBase(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
super_new = super(ModelBase, cls).__new__
parents = [b for b in bases if isinstance(b, ModelBase)]
if not parents:
# If this isn't a subclass of Model, don't do anything special.
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
if getattr(meta, 'app_label', None) is None:
# Figure out the app_label by looking one level up.
# For 'django.contrib.sites.models', this would be 'sites'.
model_module = sys.modules[new_class.__module__]
try:
kwargs = {"app_label": model_module.__name__.split('.')[-2]}
except IndexError:
kwargs = {"app_label": 'Model'}
else:
kwargs = {}
new_class.add_to_class('_meta', Options(meta, **kwargs))
if not abstract:
new_class.add_to_class('DoesNotExist', subclass_exception('DoesNotExist',
tuple(x.DoesNotExist
for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
or (ObjectDoesNotExist,), module))
new_class.add_to_class('MultipleObjectsReturned', subclass_exception('MultipleObjectsReturned',
tuple(x.MultipleObjectsReturned
for x in parents if hasattr(x, '_meta') and not x._meta.abstract)
or (MultipleObjectsReturned,), module))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
if getattr(new_class, '_default_manager', None):
if not is_proxy:
# Multi-table inheritance doesn't inherit default manager from
# parents.
new_class._default_manager = None
new_class._base_manager = None
else:
# Proxy classes do inherit parent's default manager, if none is
# set explicitly.
new_class._default_manager = new_class._default_manager._copy_to_model(new_class)
new_class._base_manager = new_class._base_manager._copy_to_model(new_class)
# Bail out early if we have already created this class.
m = get_model(new_class._meta.app_label, name, False)
if m is not None:
return m
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = new_class._meta.local_fields + \
new_class._meta.local_many_to_many + \
new_class._meta.virtual_fields
field_names = set([f.name for f in new_fields])
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [cls for cls in parents if hasattr(cls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError("Abstract base class containing model fields not permitted for proxy model '%s'." % name)
else:
continue
if base is not None:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
else:
base = parent
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
if (new_class._meta.local_fields or
new_class._meta.local_many_to_many):
raise FieldError("Proxy model '%s' contains model fields." % name)
while base._meta.proxy:
base = base._meta.proxy_for_model
new_class._meta.setup_proxy(base)
# Do the appropriate setup for any model parents.
o2o_map = dict([(f.rel.to, f) for f in new_class._meta.local_fields
if isinstance(f, OneToOneField)])
for base in parents:
original_base = base
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
# Check for clashes between locally declared fields and those
# on the base classes (we cannot handle shadowed fields at the
# moment).
for field in parent_fields:
if field.name in field_names:
raise FieldError('Local field %r in class %r clashes '
'with field of similar name from '
'base class %r' %
(field.name, name, base.__name__))
if not base._meta.abstract:
# Concrete classes...
while base._meta.proxy:
# Skip over a proxy class to the "real" base it proxies.
base = base._meta.proxy_for_model
if base in o2o_map:
field = o2o_map[base]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.module_name
field = OneToOneField(base, name=attr_name,
auto_created=True, parent_link=True)
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
# .. and abstract ones.
for field in parent_fields:
new_class.add_to_class(field.name, copy.deepcopy(field))
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base._meta.parents)
# Inherit managers from the abstract base classes.
new_class.copy_managers(base._meta.abstract_managers)
# Proxy models inherit the non-abstract managers from their base,
# unless they have redefined any of them.
if is_proxy:
new_class.copy_managers(original_base._meta.concrete_managers)
# Inherit virtual fields (like GenericForeignKey) from the parent
# class
for field in base._meta.virtual_fields:
if base._meta.abstract and field.name in field_names:
raise FieldError('Local field %r in class %r clashes '\
'with field of similar name from '\
'abstract base class %r' % \
(field.name, name, base.__name__))
new_class.add_to_class(field.name, copy.deepcopy(field))
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
register_models(new_class._meta.app_label, new_class)
# Because of the way imports happen (recursively), we may or may not be
# the first time this model tries to register with the framework. There
# should only be one class for each model, so we always return the
# registered version.
return get_model(new_class._meta.app_label, name, False)
def copy_managers(cls, base_managers):
# This is in-place sorting of an Options attribute, but that's fine.
base_managers.sort()
for _, mgr_name, manager in base_managers:
val = getattr(cls, mgr_name, None)
if not val or val is manager:
new_manager = manager._copy_to_model(cls)
cls.add_to_class(mgr_name, new_manager)
def add_to_class(cls, name, value):
if hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
"""
Creates some methods once self._meta has been populated.
"""
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True)
cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False)
# defer creating accessors on the foreign class until we are
# certain it has been created
def make_foreign_order_accessors(field, model, cls):
setattr(
field.rel.to,
'get_%s_order' % cls.__name__.lower(),
curry(method_get_order, cls)
)
setattr(
field.rel.to,
'set_%s_order' % cls.__name__.lower(),
curry(method_set_order, cls)
)
add_lazy_relation(
cls,
opts.order_with_respect_to,
opts.order_with_respect_to.rel.to,
make_foreign_order_accessors
)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join([f.attname for f in opts.fields]))
if hasattr(cls, 'get_absolute_url'):
cls.get_absolute_url = update_wrapper(curry(get_absolute_url, opts, cls.get_absolute_url),
cls.get_absolute_url)
signals.class_prepared.send(sender=cls)
class ModelState(object):
"""
A class for storing instance state
"""
def __init__(self, db=None):
self.db = db
# If true, uniqueness validation checks will consider this a new, as-yet-unsaved object.
# Necessary for correct validation of new instances of objects with explicit (non-auto) PKs.
# This impacts validation only; it has no effect on the actual save.
self.adding = True
class Model(object):
__metaclass__ = ModelBase
_deferred = False
def __init__(self, *args, **kwargs):
self._entity_exists = kwargs.pop('__entity_exists', False)
signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs)
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
args_len = len(args)
if args_len > len(self._meta.fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
fields_iter = iter(self._meta.fields)
if not kwargs:
# The ordering of the izip calls matter - izip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in izip(args, fields_iter):
setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
for val, field in izip(args, fields_iter):
setattr(self, field.attname, val)
kwargs.pop(field.name, None)
# Maintain compatibility with existing calls.
if isinstance(field.rel, ManyToOneRel):
kwargs.pop(field.attname, None)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
is_related_object = False
# This slightly odd construct is so that we can access any
# data-descriptor object (DeferredAttribute) without triggering its
# __get__ method.
if (field.attname not in kwargs and
isinstance(self.__class__.__dict__.get(field.attname), DeferredAttribute)):
# This field will be populated on request.
continue
if kwargs:
if isinstance(field.rel, ManyToOneRel):
try:
# Assume object instance was passed in.
rel_obj = kwargs.pop(field.name)
is_related_object = True
except KeyError:
try:
# Object instance wasn't passed in -- must be an ID.
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
else:
# Object instance was passed in. Special case: You can
# pass in "None" for related objects if it's allowed.
if rel_obj is None and field.null:
val = None
else:
try:
val = kwargs.pop(field.attname)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
if is_related_object:
# If we are passed a related instance, set it using the
# field.name instead of field.attname (e.g. "user" instead of
# "user_id") so that the object gets properly cached (and type
# checked) by the RelatedObjectDescriptor.
setattr(self, field.name, rel_obj)
else:
setattr(self, field.attname, val)
if kwargs:
for prop in kwargs.keys():
try:
if isinstance(getattr(self.__class__, prop), property):
setattr(self, prop, kwargs.pop(prop))
except AttributeError:
pass
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function" % kwargs.keys()[0])
self._original_pk = self.pk if self._meta.pk is not None else None
super(Model, self).__init__()
signals.post_init.send(sender=self.__class__, instance=self)
def __repr__(self):
try:
u = unicode(self)
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
return smart_str(u'<%s: %s>' % (self.__class__.__name__, u))
def __str__(self):
if hasattr(self, '__unicode__'):
return force_unicode(self).encode('utf-8')
return '%s object' % self.__class__.__name__
def __eq__(self, other):
return isinstance(other, self.__class__) and self._get_pk_val() == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._get_pk_val())
def __reduce__(self):
"""
Provide pickling support. Normally, this just dispatches to Python's
standard handling. However, for models with deferred field loading, we
need to do things manually, as they're dynamically created classes and
only module-level classes can be pickled by the default path.
"""
data = self.__dict__
model = self.__class__
# The obvious thing to do here is to invoke super().__reduce__()
# for the non-deferred case. Don't do that.
# On Python 2.4, there is something weird with __reduce__,
# and as a result, the super call will cause an infinite recursion.
# See #10547 and #12121.
defers = []
pk_val = None
if self._deferred:
from django.db.models.query_utils import deferred_class_factory
factory = deferred_class_factory
for field in self._meta.fields:
if isinstance(self.__class__.__dict__.get(field.attname),
DeferredAttribute):
defers.append(field.attname)
if pk_val is None:
# The pk_val and model values are the same for all
# DeferredAttribute classes, so we only need to do this
# once.
obj = self.__class__.__dict__[field.attname]
model = obj.model_ref()
else:
factory = simple_class_factory
return (model_unpickle, (model, defers, factory), data)
def _get_pk_val(self, meta=None):
if not meta:
meta = self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field_by_name(field_name)[0]
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
def save(self, force_insert=False, force_update=False, using=None):
"""
Saves the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
"""
if force_insert and force_update:
raise ValueError("Cannot force both insert and updating in model saving.")
self.save_base(using=using, force_insert=force_insert, force_update=force_update)
save.alters_data = True
def save_base(self, raw=False, cls=None, origin=None, force_insert=False,
force_update=False, using=None):
"""
Does the heavy-lifting involved in saving. Subclasses shouldn't need to
override this method. It's separate from save() in order to hide the
need for overrides of save() to pass around internal-only parameters
('raw', 'cls', and 'origin').
"""
using = using or router.db_for_write(self.__class__, instance=self)
entity_exists = bool(self._entity_exists and self._original_pk == self.pk)
connection = connections[using]
assert not (force_insert and force_update)
if cls is None:
cls = self.__class__
meta = cls._meta
if not meta.proxy:
origin = cls
else:
meta = cls._meta
if origin and not meta.auto_created:
signals.pre_save.send(sender=origin, instance=self, raw=raw, using=using)
# If we are in a raw save, save the object exactly as presented.
# That means that we don't try to be smart about saving attributes
# that might have come from the parent class - we just save the
# attributes we have been given to the class we have been given.
# We also go through this process to defer the save of proxy objects
# to their actual underlying model.
if not raw or meta.proxy:
if meta.proxy:
org = cls
else:
org = None
for parent, field in meta.parents.items():
# At this point, parent's primary key field may be unknown
# (for example, from administration form which doesn't fill
# this field). If so, fill it.
if field and getattr(self, parent._meta.pk.attname) is None and getattr(self, field.attname) is not None:
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
self.save_base(cls=parent, origin=org, using=using)
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
if meta.proxy:
return
if not meta.proxy:
non_pks = [f for f in meta.local_fields if not f.primary_key]
# First, try an UPDATE. If that doesn't update anything, do an INSERT.
pk_val = self._get_pk_val(meta)
pk_set = pk_val is not None
record_exists = True
manager = cls._base_manager
# TODO/NONREL: Some backends could emulate force_insert/_update
# with an optimistic transaction, but since it's costly we should
# only do it when the user explicitly wants it.
# By adding support for an optimistic locking transaction
# in Django (SQL: SELECT ... FOR UPDATE) we could even make that
# part fully reusable on all backends (the current .exists()
# check below isn't really safe if you have lots of concurrent
# requests. BTW, and neither is QuerySet.get_or_create).
try_update = connection.features.distinguishes_insert_from_update
if not try_update:
record_exists = False
if try_update and pk_set:
# Determine whether a record with the primary key already exists.
if (force_update or (not force_insert and
manager.using(using).filter(pk=pk_val).exists())):
# It does already exist, so do an UPDATE.
if force_update or non_pks:
values = [(f, None, (raw and getattr(self, f.attname) or f.pre_save(self, False))) for f in non_pks]
rows = manager.using(using).filter(pk=pk_val)._update(values)
if force_update and not rows:
raise DatabaseError("Forced update did not affect any rows.")
else:
record_exists = False
if not pk_set or not record_exists:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
order_value = manager.using(using).filter(**{field.name: getattr(self, field.attname)}).count()
self._order = order_value
if connection.features.distinguishes_insert_from_update:
add = True
else:
add = not entity_exists
if not pk_set:
if force_update:
raise ValueError("Cannot force an update in save() with no primary key.")
values = [(f, f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, add), connection=connection))
for f in meta.local_fields if not isinstance(f, AutoField)]
else:
values = [(f, f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, add), connection=connection))
for f in meta.local_fields]
record_exists = False
update_pk = bool(meta.has_auto_field and not pk_set)
if values:
# Create a new record.
result = manager._insert(values, return_id=update_pk, using=using)
else:
# Create a new record with defaults for everything.
result = manager._insert([(meta.pk, connection.ops.pk_default_value())], return_id=update_pk, raw_values=True, using=using)
if update_pk:
setattr(self, meta.pk.attname, result)
transaction.commit_unless_managed(using=using)
# Store the database on which the object was saved
self._state.db = using
# Once saved, this is no longer a to-be-added instance.
self._state.adding = False
# Signal that the save is complete
if origin and not meta.auto_created:
if connection.features.distinguishes_insert_from_update:
created = not record_exists
else:
created = not entity_exists
signals.post_save.send(sender=origin, instance=self,
created=created, raw=raw, using=using)
self._entity_exists = True
self._original_pk = self.pk
save_base.alters_data = True
def delete(self, using=None):
using = using or router.db_for_write(self.__class__, instance=self)
assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname)
collector = Collector(using=using)
collector.collect([self])
collector.delete()
self._entity_exists = False
self._original_pk = None
delete.alters_data = True
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
return force_unicode(dict(field.flatchoices).get(value, value), strings_only=True)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
if not self.pk:
raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
op = is_next and 'gt' or 'lt'
order = not is_next and '-' or ''
param = smart_str(getattr(self, field.attname))
q = Q(**{'%s__%s' % (field.name, op): param})
q = q|Q(**{field.name: param, 'pk__%s' % op: self.pk})
qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by('%s%s' % (order, field.name), '%spk' % order)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name)
def _get_next_or_previous_in_order(self, is_next):
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
op = is_next and 'gt' or 'lt'
order = not is_next and '-_order' or '_order'
order_field = self._meta.order_with_respect_to
obj = self._default_manager.filter(**{
order_field.name: getattr(self, order_field.attname)
}).filter(**{
'_order__%s' % op: self._default_manager.values('_order').filter(**{
self._meta.pk.name: self.pk
})
}).order_by(order)[:1].get()
setattr(self, cachename, obj)
return getattr(self, cachename)
def prepare_database_save(self, unused):
return self.pk
def clean(self):
"""
Hook for doing any extra model-wide validation after clean() has been
called on every field by self.clean_fields. Any ValidationError raised
by this method will not be associated with a particular field; it will
have a special-case association with the field defined by NON_FIELD_ERRORS.
"""
pass
def validate_unique(self, exclude=None):
"""
Checks unique constraints on the model and raises ``ValidationError``
if any failed.
"""
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
errors = self._perform_unique_checks(unique_checks)
date_errors = self._perform_date_checks(date_checks)
for k, v in date_errors.items():
errors.setdefault(k, []).extend(v)
if errors:
raise ValidationError(errors)
def _get_unique_checks(self, exclude=None):
"""
Gather a list of checks to perform. Since validate_unique could be
called from a ModelForm, some fields may have been excluded; we can't
perform a unique check on a model that is missing fields involved
in that check.
Fields that did not validate should also be excluded, but they need
to be passed in via the exclude argument.
"""
if exclude is None:
exclude = []
unique_checks = []
unique_togethers = [(self.__class__, self._meta.unique_together)]
for parent_class in self._meta.parents.keys():
if parent_class._meta.unique_together:
unique_togethers.append((parent_class, parent_class._meta.unique_together))
for model_class, unique_together in unique_togethers:
for check in unique_together:
for name in check:
# If this is an excluded field, don't add this check.
if name in exclude:
break
else:
unique_checks.append((model_class, tuple(check)))
# These are checks for the unique_for_<date/year/month>.
date_checks = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks.
fields_with_class = [(self.__class__, self._meta.local_fields)]
for parent_class in self._meta.parents.keys():
fields_with_class.append((parent_class, parent_class._meta.local_fields))
for model_class, fields in fields_with_class:
for f in fields:
name = f.name
if name in exclude:
continue
if f.unique:
unique_checks.append((model_class, (name,)))
if f.unique_for_date and f.unique_for_date not in exclude:
date_checks.append((model_class, 'date', name, f.unique_for_date))
if f.unique_for_year and f.unique_for_year not in exclude:
date_checks.append((model_class, 'year', name, f.unique_for_year))
if f.unique_for_month and f.unique_for_month not in exclude:
date_checks.append((model_class, 'month', name, f.unique_for_month))
return unique_checks, date_checks
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
if lookup_value is None:
# no value, skip the lookup
continue
if f.primary_key and not self._state.adding:
# no need to check for unique primary key when editing
continue
lookup_kwargs[str(field_name)] = lookup_value
# some fields were skipped, no reason to do the check
if len(unique_check) != len(lookup_kwargs.keys()):
continue
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
return errors
def _perform_date_checks(self, date_checks):
errors = {}
for model_class, lookup_type, field, unique_for in date_checks:
lookup_kwargs = {}
# there's a ticket to add a date lookup, we can remove this special
# case if that makes it's way in
date = getattr(self, unique_for)
if date is None:
continue
if lookup_type == 'date':
lookup_kwargs['%s__day' % unique_for] = date.day
lookup_kwargs['%s__month' % unique_for] = date.month
lookup_kwargs['%s__year' % unique_for] = date.year
else:
lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type)
lookup_kwargs[field] = getattr(self, field)
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.setdefault(field, []).append(
self.date_error_message(lookup_type, field, unique_for)
)
return errors
def date_error_message(self, lookup_type, field, unique_for):
opts = self._meta
return _(u"%(field_name)s must be unique for %(date_field)s %(lookup)s.") % {
'field_name': unicode(capfirst(opts.get_field(field).verbose_name)),
'date_field': unicode(capfirst(opts.get_field(unique_for).verbose_name)),
'lookup': lookup_type,
}
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
model_name = capfirst(opts.verbose_name)
# A unique field
if len(unique_check) == 1:
field_name = unique_check[0]
field_label = capfirst(opts.get_field(field_name).verbose_name)
# Insert the error into the error dict, very sneaky
return _(u"%(model_name)s with this %(field_label)s already exists.") % {
'model_name': unicode(model_name),
'field_label': unicode(field_label)
}
# unique_together
else:
field_labels = map(lambda f: capfirst(opts.get_field(f).verbose_name), unique_check)
field_labels = get_text_list(field_labels, _('and'))
return _(u"%(model_name)s with this %(field_label)s already exists.") % {
'model_name': unicode(model_name),
'field_label': unicode(field_labels)
}
def full_clean(self, exclude=None):
"""
Calls clean_fields, clean, and validate_unique, on the model,
and raises a ``ValidationError`` for any errors that occured.
"""
errors = {}
if exclude is None:
exclude = []
try:
self.clean_fields(exclude=exclude)
except ValidationError, e:
errors = e.update_error_dict(errors)
# Form.clean() is run even if other validation fails, so do the
# same with Model.clean() for consistency.
try:
self.clean()
except ValidationError, e:
errors = e.update_error_dict(errors)
# Run unique checks, but only for fields that passed validation.
for name in errors.keys():
if name != NON_FIELD_ERRORS and name not in exclude:
exclude.append(name)
try:
self.validate_unique(exclude=exclude)
except ValidationError, e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def clean_fields(self, exclude=None):
"""
Cleans all fields and raises a ValidationError containing message_dict
of all validation errors if any occur.
"""
if exclude is None:
exclude = []
errors = {}
for f in self._meta.fields:
if f.name in exclude:
continue
# Skip validation for empty fields with blank=True. The developer
# is responsible for making sure they have a valid value.
raw_value = getattr(self, f.attname)
if f.blank and raw_value in validators.EMPTY_VALUES:
continue
try:
setattr(self, f.attname, f.clean(raw_value, self))
except ValidationError, e:
errors[f.name] = e.messages
if errors:
raise ValidationError(errors)
############################################
# HELPER FUNCTIONS (CURRIED MODEL METHODS) #
############################################
# ORDERING METHODS #########################
def method_set_order(ordered_obj, self, id_list, using=None):
if using is None:
using = DEFAULT_DB_ALIAS
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
# FIXME: It would be nice if there was an "update many" version of update
# for situations like this.
for i, j in enumerate(id_list):
ordered_obj.objects.filter(**{'pk': j, order_name: rel_val}).update(_order=i)
transaction.commit_unless_managed(using=using)
def method_get_order(ordered_obj, self):
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
pk_name = ordered_obj._meta.pk.name
return [r[pk_name] for r in
ordered_obj.objects.filter(**{order_name: rel_val}).values(pk_name)]
##############################################
# HELPER FUNCTIONS (CURRIED MODEL FUNCTIONS) #
##############################################
def get_absolute_url(opts, func, self, *args, **kwargs):
return settings.ABSOLUTE_URL_OVERRIDES.get('%s.%s' % (opts.app_label, opts.module_name), func)(self, *args, **kwargs)
########
# MISC #
########
class Empty(object):
pass
def simple_class_factory(model, attrs):
"""Used to unpickle Models without deferred fields.
We need to do this the hard way, rather than just using
the default __reduce__ implementation, because of a
__deepcopy__ problem in Python 2.4
"""
return model
def model_unpickle(model, attrs, factory):
"""
Used to unpickle Model subclasses with deferred fields.
"""
cls = factory(model, attrs)
return cls.__new__(cls)
model_unpickle.__safe_for_unpickle__ = True
if sys.version_info < (2, 5):
# Prior to Python 2.5, Exception was an old-style class
def subclass_exception(name, parents, unused):
return types.ClassType(name, parents, {})
else:
def subclass_exception(name, parents, module):
return type(name, parents, {'__module__': module})
|
davecranwell/wagtail | refs/heads/master | wagtail/tests/snippets/models.py | 5 | from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from wagtail.wagtailsearch import index
from wagtail.wagtailsnippets.models import register_snippet
# AlphaSnippet and ZuluSnippet are for testing ordering of
# snippets when registering. They are named as such to ensure
# thier ordering is clear. They are registered during testing
# to ensure specific [in]correct register ordering
# AlphaSnippet is registered during TestSnippetOrdering
@python_2_unicode_compatible
class AlphaSnippet(models.Model):
text = models.CharField(max_length=255)
def __str__(self):
return self.text
# ZuluSnippet is registered during TestSnippetOrdering
@python_2_unicode_compatible
class ZuluSnippet(models.Model):
text = models.CharField(max_length=255)
def __str__(self):
return self.text
# Register model as snippet using register_snippet as both a function and a decorator
class RegisterFunction(models.Model):
pass
register_snippet(RegisterFunction)
@register_snippet
class RegisterDecorator(models.Model):
pass
# A snippet model that inherits from index.Indexed can be searched on
@register_snippet
class SearchableSnippet(models.Model, index.Indexed):
text = models.CharField(max_length=255)
search_fields = (
index.SearchField('text'),
)
def __str__(self):
return self.text
|
alexlo03/ansible | refs/heads/devel | lib/ansible/modules/cloud/scaleway/scaleway_security_group.py | 21 | #!/usr/bin/python
#
# Scaleway Security Group management module
#
# Copyright (C) 2018 Antoine Barbare (antoinebarbare@gmail.com).
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: scaleway_security_group
short_description: Scaleway Security Group management module
version_added: "2.8"
author: Antoine Barbare (@abarbare)
description:
- This module manages Security Group on Scaleway account
U(https://developer.scaleway.com)
extends_documentation_fragment: scaleway
options:
state:
description:
- Indicate desired state of the Security Group.
default: present
choices:
- present
- absent
organization:
description:
- Organization identifier
required: true
region:
description:
- Scaleway region to use (for example C(par1)).
required: true
choices:
- ams1
- EMEA-NL-EVS
- par1
- EMEA-FR-PAR1
name:
description:
- Name of the Security Group
required: true
description:
description:
- Description of the Security Group
stateful:
description:
- Create a stateful security group which allows established connections in and out
required: true
type: bool
inbound_default_policy:
description:
- Default policy for incoming trafic
choices:
- accept
- drop
outbound_default_policy:
description:
- Default policy for outcoming trafic
choices:
- accept
- drop
organization_default:
type: bool
description:
- Create security group to be the default one
'''
EXAMPLES = '''
- name: Create a Security Group
scaleway_security_group:
state: present
region: par1
name: security_group
description: "my security group description"
organization: "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9"
stateful: false
inbound_default_policy: accept
outbound_default_policy: accept
organization_default: false
register: security_group_creation_task
'''
RETURN = '''
data:
description: This is only present when C(state=present)
returned: when C(state=present)
type: dict
sample: {
"scaleway_security_group": {
"description": "my security group description",
"enable_default_security": true,
"id": "0168fb1f-cc46-4f69-b4be-c95d2a19bcae",
"inbound_default_policy": "accept",
"name": "security_group",
"organization": "43a3b6c8-916f-477b-b7ec-ff1898f5fdd9",
"organization_default": false,
"outbound_default_policy": "accept",
"servers": [],
"stateful": false
}
}
'''
from ansible.module_utils.scaleway import SCALEWAY_LOCATION, scaleway_argument_spec, Scaleway
from ansible.module_utils.basic import AnsibleModule
from uuid import uuid4
def payload_from_security_group(security_group):
return dict(
(k, v)
for k, v in security_group.items()
if k != 'id' and v is not None
)
def present_strategy(api, security_group):
ret = {'changed': False}
response = api.get('security_groups')
if not response.ok:
api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json))
security_group_lookup = dict((sg['name'], sg)
for sg in response.json['security_groups'])
if security_group['name'] not in security_group_lookup.keys():
ret['changed'] = True
if api.module.check_mode:
# Help user when check mode is enabled by defining id key
ret['scaleway_security_group'] = {'id': str(uuid4())}
return ret
# Create Security Group
response = api.post('/security_groups',
data=payload_from_security_group(security_group))
if not response.ok:
msg = 'Error during security group creation: "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json)
api.module.fail_json(msg=msg)
ret['scaleway_security_group'] = response.json['security_group']
else:
ret['scaleway_security_group'] = security_group_lookup[security_group['name']]
return ret
def absent_strategy(api, security_group):
response = api.get('security_groups')
ret = {'changed': False}
if not response.ok:
api.module.fail_json(msg='Error getting security groups "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json))
security_group_lookup = dict((sg['name'], sg)
for sg in response.json['security_groups'])
if security_group['name'] not in security_group_lookup.keys():
return ret
ret['changed'] = True
if api.module.check_mode:
return ret
response = api.delete('/security_groups/' + security_group_lookup[security_group['name']]['id'])
if not response.ok:
api.module.fail_json(msg='Error deleting security group "%s": "%s" (%s)' % (response.info['msg'], response.json['message'], response.json))
return ret
def core(module):
security_group = {
'organization': module.params['organization'],
'name': module.params['name'],
'description': module.params['description'],
'stateful': module.params['stateful'],
'inbound_default_policy': module.params['inbound_default_policy'],
'outbound_default_policy': module.params['outbound_default_policy'],
'organization_default': module.params['organization_default'],
}
region = module.params['region']
module.params['api_url'] = SCALEWAY_LOCATION[region]['api_endpoint']
api = Scaleway(module=module)
if module.params['state'] == 'present':
summary = present_strategy(api=api, security_group=security_group)
else:
summary = absent_strategy(api=api, security_group=security_group)
module.exit_json(**summary)
def main():
argument_spec = scaleway_argument_spec()
argument_spec.update(dict(
state=dict(default='present', choices=['absent', 'present']),
organization=dict(required=True),
name=dict(required=True),
description=dict(),
region=dict(required=True, choices=SCALEWAY_LOCATION.keys()),
stateful=dict(required=True, type=bool),
inbound_default_policy=dict(choices=['accept', 'drop']),
outbound_default_policy=dict(choices=['accept', 'drop']),
organization_default=dict(type=bool),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[['stateful', True, ['inbound_default_policy', 'outbound_default_policy']]]
)
core(module)
if __name__ == '__main__':
main()
|
manics/openmicroscopy | refs/heads/develop | components/tools/OmeroPy/test/integration/tablestest/test_backwards_compatibility.py | 6 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2014 University of Dundee & Open Microscopy Environment.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Check backwards compatibility of the tables service
"""
import os.path
import bz2
import tempfile
import omero
import omero.clients
import omero.grid
from omero.testlib import ITest
import pytest
from omero import columns
from omero.rtypes import rint
class TestBackwardsCompatibility(ITest):
# def setUp(self):
# super(BackwardsCompatibilityTest, self).setUp()
def uploadHdf5(self, file):
"""
Decompress the BZ2-compressed HDF5 test file and upload to server.
file should be relative to the directory containing this file.
"""
dir = os.path.dirname(os.path.realpath(__file__))
file = os.path.join(dir, file)
tmpf = tempfile.NamedTemporaryFile(delete=False)
bzf = bz2.BZ2File(file)
tmpf.write(bzf.read())
bzf.close()
tmpf.close()
ofile = self.client.upload(
tmpf.name, name=file, type='application/x-hdf')
print "Uploaded OriginalFile:", ofile.getId().val
return ofile
def createMaskCol(self):
mask = columns.MaskColumnI('mask', 'desc', None)
mask.imageId = [1, 2]
mask.theZ = [3, 4]
mask.theT = [5, 6]
mask.x = [7.0, 8.0]
mask.y = [9.0, 10.0]
mask.w = [11.0, 12.0]
mask.h = [13.0, 14.0]
mask.bytes = [[15], [16, 17, 18, 19, 20]]
return mask
def checkMaskCol(self, test):
def arr(x):
import numpy
import tables
return numpy.fromstring(x, count=len(x), dtype=tables.UInt8Atom())
assert 1 == test.imageId[0]
assert 3 == test.theZ[0]
assert 5 == test.theT[0]
assert 7 == test.x[0]
assert 9 == test.y[0]
assert 11 == test.w[0]
assert 13 == test.h[0]
assert [15] == arr(test.bytes[0])
assert 2 == test.imageId[1]
assert 4 == test.theZ[1]
assert 6 == test.theT[1]
assert 8 == test.x[1]
assert 10 == test.y[1]
assert 12 == test.w[1]
assert 14 == test.h[1]
x = [16, 17, 18, 19, 20]
y = arr(test.bytes[1])
for i in range(len(x)):
assert x[i] == y[i]
def testCreateAllColumns_4_4_5(self):
"""
Call this method to create the reference HDF5 table under a 4.4.5
or older server. The OriginalFile ID of the table will be printed,
and can be used to find the file under ${omero.data.dir}/Files/.
To run manually goto ``components/tools/OmeroPy``, and run:
``py.test test/integration/tablestest/test_backwards_compatibility.py\
-s -k testCreateAllColumns_4_4_5``
"""
grid = self.client.sf.sharedResources()
repoMap = grid.repositories()
repoObj = repoMap.descriptions[0]
table = grid.newTable(repoObj.id.val, "/test")
assert table
fcol = columns.FileColumnI('filecol', 'file col')
fcol.values = [10, 20]
icol = columns.ImageColumnI('imagecol', 'image col')
icol.values = [30, 40]
rcol = columns.RoiColumnI('roicol', 'roi col')
rcol.values = [50, 60]
wcol = columns.WellColumnI('wellcol', 'well col')
wcol.values = [70, 80]
pcol = columns.PlateColumnI('platecol', 'plate col')
pcol.values = [90, 100]
bcol = columns.BoolColumnI('boolcol', 'bool col')
bcol.values = [True, False]
dcol = columns.DoubleColumnI('doublecol', 'double col')
dcol.values = [0.25, 0.5]
lcol = columns.LongColumnI('longcol', 'long col')
lcol.values = [-1, -2]
scol = columns.StringColumnI('stringcol', 'string col', 3)
scol.values = ["abc", "de"]
# larr = columns.LongArrayColumnI('longarr', 'longarr col', 2)
# larr.values = [[-2, -1], [1, 2]]
# farr = columns.FloatArrayColumnI('floatarr', 'floatarr col', 2)
# farr.values = [[-0.25, -0.5], [0.125, 0.0625]]
# darr = columns.DoubleArrayColumnI('doublearr', 'doublearr col', 2)
# darr.values = [[-0.25, -0.5], [0.125, 0.0625]]
mask = self.createMaskCol()
cols = [fcol, icol, rcol, wcol, pcol,
bcol, dcol, lcol, scol, mask]
# larr, farr, darr]
table.initialize(cols)
table.addData(cols)
data = table.readCoordinates([0, 1])
testf = data.columns[0].values
assert 10 == testf[0]
assert 20 == testf[1]
testi = data.columns[1].values
assert 30 == testi[0]
assert 40 == testi[1]
testr = data.columns[2].values
assert 50 == testr[0]
assert 60 == testr[1]
testw = data.columns[3].values
assert 70 == testw[0]
assert 80 == testw[1]
testp = data.columns[4].values
assert 90 == testp[0]
assert 100 == testp[1]
testb = data.columns[5].values
assert testb[0]
assert not testb[1]
testd = data.columns[6].values
assert 0.25 == testd[0]
assert 0.5 == testd[1]
testl = data.columns[7].values
assert -1 == testl[0]
assert -2 == testl[1]
tests = data.columns[8].values
assert "abc" == tests[0]
assert "de" == tests[1]
testm = data.columns[9]
self.checkMaskCol(testm)
# testla = data.columns[10].values
# assert [-2, -1] == testla[0]
# assert [1, 2] == testla[1]
# testda = data.columns[11].values
# assert [-0.25, -0.5] == testda[0]
# assert [0.125, 0.0625] == testda[1]
ofile = table.getOriginalFile()
print "Created OriginalFile:", ofile.getId().val
def testAllColumns_4_4_5(self):
"""
Check whether a table created under 4.4.5 or older is still usable
with a newer server
"""
ofile = self.uploadHdf5("service-reference-dev_4_4_5.h5.bz2")
grid = self.client.sf.sharedResources()
table = grid.openTable(ofile)
assert table
expectedTypes = [
omero.grid.FileColumn,
omero.grid.ImageColumn,
omero.grid.RoiColumn,
omero.grid.WellColumn,
omero.grid.PlateColumn,
omero.grid.BoolColumn,
omero.grid.DoubleColumn,
omero.grid.LongColumn,
omero.grid.StringColumn,
omero.grid.MaskColumn
]
# omero.grid.FloatArrayColumn,
# omero.grid.DoubleArrayColumn,
# omero.grid.LongArrayColumn,
expectedNames = [
'filecol',
'imagecol',
'roicol',
'wellcol',
'platecol',
'boolcol',
'doublecol',
'longcol',
'stringcol',
'mask'
]
# 'longarr'
# 'floatarr'
# 'doublearr'
headers = table.getHeaders()
assert [type(x) for x in headers] == expectedTypes
assert [x.name for x in headers] == expectedNames
assert table.getNumberOfRows() == 2
data = table.readCoordinates([0, 1])
testf = data.columns[0].values
assert 10 == testf[0]
assert 20 == testf[1]
testi = data.columns[1].values
assert 30 == testi[0]
assert 40 == testi[1]
testr = data.columns[2].values
assert 50 == testr[0]
assert 60 == testr[1]
testw = data.columns[3].values
assert 70 == testw[0]
assert 80 == testw[1]
testp = data.columns[4].values
assert 90 == testp[0]
assert 100 == testp[1]
testb = data.columns[5].values
assert testb[0]
assert not testb[1]
testd = data.columns[6].values
assert 0.25 == testd[0]
assert 0.5 == testd[1]
testl = data.columns[7].values
assert -1 == testl[0]
assert -2 == testl[1]
tests = data.columns[8].values
assert "abc" == tests[0]
assert "de" == tests[1]
testm = data.columns[9]
self.checkMaskCol(testm)
# testla = data.columns[10].values
# assert [-2, -1] == testla[0]
# assert [1, 2] == testla[1]
# testda = data.columns[11].values
# assert [-0.25, -0.5] == testda[0]
# assert [0.125, 0.0625] == testda[1]
# Now try an update
updatel = omero.grid.LongColumn('longcol', '', [12345])
updateData = omero.grid.Data(rowNumbers=[1], columns=[updatel])
table.update(updateData)
assert table.getNumberOfRows() == 2
data2 = table.readCoordinates([0, 1])
for n in [0, 1, 2, 3, 4, 5, 6, 8]:
assert data.columns[n].values == data2.columns[n].values
self.checkMaskCol(data2.columns[9])
testl2 = data2.columns[7].values
assert -1 == testl2[0]
assert 12345 == testl2[1]
def testMetadataException(self):
"""
Check whether metadata set methods are blocked on a v1 (pre-5.1) table
"""
ofile = self.uploadHdf5("service-reference-dev_4_4_5.h5.bz2")
grid = self.client.sf.sharedResources()
table = grid.openTable(ofile)
expected = 'Tables metadata is only supported for OMERO.tables >= 2'
with pytest.raises(omero.ApiUsageException) as exc:
table.setMetadata('a', rint(1))
with pytest.raises(omero.ApiUsageException) as exc:
table.setAllMetadata({'a': rint(1)})
assert exc.value.message == expected
|
hugobuddel/orange3 | refs/heads/master | Orange/widgets/visualize/owmosaic.py | 1 | import os
import sys
from collections import defaultdict
from functools import reduce
from itertools import product
from math import sqrt
import numpy
from PyQt4.QtCore import QPoint, Qt, QRectF
from PyQt4.QtGui import (QGraphicsRectItem, QGraphicsView, QColor,
QGraphicsScene, QPainter, QIcon, QDialog, QPen,
QVBoxLayout, QListWidget, QSizePolicy, QApplication,
QGraphicsTextItem, QBrush, QGraphicsLineItem,
QGraphicsEllipseItem)
from Orange.data import Table, filter
from Orange.data.sql.table import SqlTable, LARGE_TABLE, DEFAULT_SAMPLE_TIME
from Orange.statistics.distribution import get_distribution
from Orange.widgets import gui
from Orange.widgets.settings import (Setting, DomainContextHandler,
ContextSetting)
from Orange.widgets.utils import getHtmlCompatibleString
from Orange.widgets.utils.colorpalette import ColorPaletteDlg, DefaultRGBColors
from Orange.widgets.utils.scaling import get_variable_values_sorted
from Orange.widgets.widget import OWWidget, Default
from Orange.widgets.io import FileFormats
PEARSON = 0
CLASS_DISTRIBUTION = 1
BOTTOM = 0
LEFT = 1
TOP = 2
RIGHT = 3
# using function with same name from owtools.py
# def get_variable_values_sorted(param):
# if hasattr(param, "values"):
# return param.values
# return []
class SelectionRectangle(QGraphicsRectItem):
pass
class MosaicSceneView(QGraphicsView):
def __init__(self, widget, *args):
QGraphicsView.__init__(self, *args)
self.widget = widget
self.bMouseDown = False
self.mouseDownPosition = QPoint(0, 0)
self.tempRect = None
# mouse button was pressed
def mousePressEvent(self, ev):
QGraphicsView.mousePressEvent(self, ev)
self.mouseDownPosition = QPoint(ev.pos().x(), ev.pos().y())
self.bMouseDown = True
self.mouseMoveEvent(ev)
# mouse button was pressed and mouse is moving ######################
def mouseMoveEvent(self, ev):
QGraphicsView.mouseMoveEvent(self, ev)
if ev.button() == Qt.RightButton:
return
if not self.bMouseDown:
if self.tempRect:
self.scene().removeItem(self.tempRect)
self.tempRect = None
else:
if not self.tempRect:
self.tempRect = SelectionRectangle(None, self.scene())
rect = QRectF(min(self.mouseDownPosition.x(), ev.pos().x()),
min(self.mouseDownPosition.y(), ev.pos().y()),
max(abs(self.mouseDownPosition.x() - ev.pos().x()), 1),
max(abs(self.mouseDownPosition.y() - ev.pos().y()), 1))
self.tempRect.setRect(rect)
# mouse button was released #########################################
def mouseReleaseEvent(self, ev):
self.bMouseDown = False
self.widget.key_modifier = ev.modifiers()
if self.tempRect:
if ev.button() == Qt.LeftButton and not ev.modifiers() & \
(Qt.AltModifier | Qt.ControlModifier | Qt.ShiftModifier):
self.widget.selectionConditions = []
self.widget.addSelection(self.tempRect)
self.scene().removeItem(self.tempRect)
self.tempRect = None
class OWMosaicDisplay(OWWidget):
name = "Mosaic Display"
description = "Display data in a mosaic plot."
icon = "icons/MosaicDisplay.svg"
inputs = [("Data", Table, "setData", Default),
("Data Subset", Table, "setSubsetData")]
outputs = [("Selected Data", Table)]
settingsHandler = DomainContextHandler()
show_apriori_distribution_lines = Setting(False)
show_apriori_distribution_boxes = Setting(True)
use_boxes = Setting(True)
interior_coloring = Setting(0)
color_settings = Setting(None)
selected_schema_index = Setting(0)
show_subset_data_boxes = Setting(True)
remove_unused_labels = Setting(True)
variable1 = ContextSetting("")
variable2 = ContextSetting("")
variable3 = ContextSetting("")
variable4 = ContextSetting("")
interior_coloring_opts = ["Pearson residuals",
"Class distribution"]
subboxesOpts = ["Expected distribution",
"Apriori distribution"]
_apriori_pen_color = QColor(255, 255, 255, 128)
_box_size = 5
_cellspace = 4
want_graph = True
def __init__(self, parent=None):
super().__init__(self, parent)
# set default settings
self.data = None
self.unprocessed_subset_data = None
self.subset_data = None
self.names = [] # class values
self.exploreAttrPermutations = 0
self.attributeNameOffset = 20
self.attributeValueOffset = 3
self.residuals = [] # residual values if the residuals are visualized
self.aprioriDistributions = []
self.colorPalette = None
self.permutationDict = {}
self.manualAttributeValuesDict = {}
self.conditionalDict = None
self.conditionalSubsetDict = None
self.distributionDict = None
self.distributionSubsetDict = None
self.activeRule = None
self.selectionRectangle = None
self.selectionConditions = []
self.recentlyAdded = []
self.key_modifier = Qt.NoModifier
# color paletes for visualizing pearsons residuals
self.blue_colors = [QColor(255, 255, 255), QColor(210, 210, 255),
QColor(110, 110, 255), QColor(0, 0, 255)]
self.red_colors = [QColor(255, 255, 255), QColor(255, 200, 200),
QColor(255, 100, 100), QColor(255, 0, 0)]
self.canvas = QGraphicsScene()
self.canvas_view = MosaicSceneView(self, self.canvas, self.mainArea)
self.mainArea.layout().addWidget(self.canvas_view)
self.canvas_view.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.canvas_view.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.canvas_view.setRenderHint(QPainter.Antialiasing)
# self.canvasView.setAlignment(Qt.AlignLeft | Qt.AlignTop)
# GUI
# add controls to self.controlArea widget
# self.controlArea.setMinimumWidth(235)
box = gui.widgetBox(self.controlArea, "Variables")
for i in range(1, 5):
inbox = gui.widgetBox(box, orientation="horizontal")
combo = gui.comboBox(inbox, self, value="variable{}".format(i),
# label="Variable {}".format(i),
orientation="horizontal",
callback=self.updateGraphAndPermList,
sendSelectedValue=True, valueType=str,
contentsLength=12)
butt = gui.button(inbox, self, "", callback=self.orderAttributeValues,
tooltip="Change the order of attribute values")
butt.setFixedSize(26, 24)
butt.setCheckable(1)
butt.setIcon(QIcon(gui.resource_filename("icons/Dlg_sort.png")))
setattr(self, "sort{}".format(i), butt)
setattr(self, "attr{}".format(i) + "Combo", combo)
# self.optimizationDlg = OWMosaicOptimization(self, self.signalManager)
# optimizationButtons = gui.widgetBox(self.GeneralTab, "Dialogs", orientation="horizontal")
# gui.button(optimizationButtons, self, "VizRank", callback=self.optimizationDlg.reshow, debuggingEnabled=0,
# tooltip="Find attribute combinations that will separate different classes as clearly as possible.")
# self.collapsableWBox = gui.collapsableWidgetBox(self.GeneralTab, "Explore Attribute Permutations", self,
# "exploreAttrPermutations",
# callback=self.permutationListToggle)
# self.permutationList = gui.listBox(self.collapsableWBox, self, callback=self.setSelectedPermutation)
# self.permutationList.hide()
box5 = gui.widgetBox(self.controlArea, "Visual Settings")
self.cb_color = gui.comboBox(box5, self, "interior_coloring",
label="Color", orientation="horizontal",
items=self.interior_coloring_opts,
callback=self.updateGraph)
gui.checkBox(box5, self, "remove_unused_labels",
"Remove unused attribute labels",
callback=self.updateGraph)
gui.checkBox(box5, self, 'show_apriori_distribution_lines',
'Show apriori distribution with lines',
callback=self.updateGraph)
self.box8 = gui.widgetBox(self.controlArea, "Boxes in Cells")
self.cb_show_subset = gui.checkBox(
self.box8, self, 'show_subset_data_boxes',
'Show subset data distribution', callback=self.updateGraph)
self.cb_show_subset.setDisabled(self.subset_data is None)
cb = gui.checkBox(self.box8, self, 'use_boxes', 'Display sub-box...',
callback=self.updateGraph)
ind_box = gui.indentedBox(self.box8, sep=gui.checkButtonOffsetHint(cb))
gui.comboBox(ind_box, self, 'show_apriori_distribution_boxes',
items=self.subboxesOpts, callback=self.updateGraph)
hbox = gui.widgetBox(self.controlArea, "Colors", addSpace=1)
gui.button(hbox, self, "Set Colors", self.setColors,
tooltip="Set the color palette for class values")
# self.box6.setSizePolicy(QSizePolicy(QSizePolicy.Minimum , QSizePolicy.Fixed ))
self.controlArea.layout().addStretch(1)
# self.graphButton.clicked.connect(saveToFileCanvas)
self.icons = gui.attributeIconDict
self.resize(830, 550)
# self.VizRankLearner = MosaicTreeLearner(self.optimizationDlg)
# self.send("Learner", self.VizRankLearner)
# self.wdChildDialogs = [self.optimizationDlg] # used when running widget debugging
# self.collapsableWBox.updateControls()
dlg = self.createColorDialog()
self.colorPalette = dlg.getDiscretePalette("discPalette")
self.selectionColorPalette = [QColor(*col) for col in DefaultRGBColors]
gui.rubber(self.controlArea)
self.graphButton.clicked.connect(self.save_graph)
def permutationListToggle(self):
if self.exploreAttrPermutations:
self.updateGraphAndPermList()
def setSelectedPermutation(self):
newRow = self.permutationList.currentRow()
if self.permutationList.count() > 0 and self.bestPlacements and newRow < len(self.bestPlacements):
self.removeAllSelections()
val, attrList, valueOrder = self.bestPlacements[newRow]
if len(attrList) > 0: self.variable1 = attrList[0]
if len(attrList) > 1: self.variable2 = attrList[1]
if len(attrList) > 2: self.variable3 = attrList[2]
if len(attrList) > 3: self.variable4 = attrList[3]
self.updateGraph(
customValueOrderDict=dict([(attrList[i], tuple(valueOrder[i])) for i in range(len(attrList))]))
def orderAttributeValues(self):
attr = None
if self.sort1.isChecked():
attr = self.variable1
elif self.sort2.isChecked():
attr = self.variable2
elif self.sort3.isChecked():
attr = self.variable3
elif self.sort4.isChecked():
attr = self.variable4
if self.data and attr != "" and attr != "(None)":
dlg = self.SortAttributeValuesDlg(attr,
self.manualAttributeValuesDict.get(attr,
None) or get_variable_values_sorted(
self.data.domain[attr]))
if dlg.exec_() == QDialog.Accepted:
self.manualAttributeValuesDict[attr] = [str(dlg.attributeList.item(i).text()) for i in
range(dlg.attributeList.count())]
for control in [self.sort1, self.sort2, self.sort3, self.sort4]:
control.setChecked(0)
self.updateGraph()
# initialize combo boxes with discrete attributes
def initCombos(self, data):
for combo in [self.attr1Combo, self.attr2Combo, self.attr3Combo, self.attr4Combo]:
combo.clear()
if data == None: return
self.attr2Combo.addItem("(None)")
self.attr3Combo.addItem("(None)")
self.attr4Combo.addItem("(None)")
for attr in data.domain:
if attr.is_discrete:
for combo in [self.attr1Combo, self.attr2Combo, self.attr3Combo, self.attr4Combo]:
combo.addItem(self.icons[attr], attr.name)
if self.attr1Combo.count() > 0:
self.variable1 = str(self.attr1Combo.itemText(0))
self.variable2 = str(self.attr2Combo.itemText(0 + 2 * (self.attr2Combo.count() > 2)))
self.variable3 = str(self.attr3Combo.itemText(0))
self.variable4 = str(self.attr4Combo.itemText(0))
# when we resize the widget, we have to redraw the data
def resizeEvent(self, e):
OWWidget.resizeEvent(self, e)
self.updateGraph()
def showEvent(self, ev):
OWWidget.showEvent(self, ev)
self.updateGraph()
def closeEvent(self, ce):
# self.optimizationDlg.hide()
QDialog.closeEvent(self, ce)
# ------------- SIGNALS --------------------------
# # DATA signal - receive new data and update all fields
def setData(self, data):
if type(data) == SqlTable and data.approx_len() > LARGE_TABLE:
data = data.sample_time(DEFAULT_SAMPLE_TIME)
self.closeContext()
self.data = data
self.initCombos(self.data)
self.bestPlacements = None
self.manualAttributeValuesDict = {}
self.attributeValuesDict = {}
self.information([0, 1, 2])
if not self.data:
return
if any(attr.is_continuous for attr in self.data.domain):
self.information(0, "Data contains continuous variables. "
"Discretize the data to use them.")
""" TODO: check
if data.has_missing_class():
self.information(1, "Examples with missing classes were removed.")
if self.removeUnusedValues and len(data) != len(self.data):
self.information(2, "Unused attribute values were removed.")
"""
if self.data.domain.class_var is None:
self.cb_color.removeItem(CLASS_DISTRIBUTION)
elif self.cb_color.count() < len(self.interior_coloring_opts):
self.cb_color.addItem(
self.interior_coloring_opts[CLASS_DISTRIBUTION])
if self.data.domain.has_discrete_class:
self.interior_coloring = CLASS_DISTRIBUTION
self.colorPalette.set_number_of_colors(
len(self.data.domain.class_var.values))
else:
self.interior_coloring = PEARSON
self.openContext(self.data)
# if we first received subset data
# we now have to call setSubsetData to process it
if self.unprocessed_subset_data:
self.setSubsetData(self.unprocessed_subset_data)
self.unprocessed_subset_data = None
def setSubsetData(self, data):
if not self.data:
self.unprocessed_subset_data = data
self.warning(10)
else:
try:
self.subset_data = data.from_table(self.data.domain, data)
self.warning(10)
except:
self.subset_data = None
self.warning(10, data and "'Data' and 'Data Subset'" +
" do not have compatible domains." or "")
self.cb_show_subset.setDisabled(self.subset_data is None)
# this is called by OWBaseWidget after setData and setSubsetData are called.
# this way the graph is updated only once
def handleNewSignals(self):
self.updateGraphAndPermList()
# ------------------------------------------------
def setShownAttributes(self, attrList, **args):
if not attrList: return
self.variable1 = attrList[0]
if len(attrList) > 1:
self.variable2 = attrList[1]
else:
self.variable2 = "(None)"
if len(attrList) > 2:
self.variable3 = attrList[2]
else:
self.variable3 = "(None)"
if len(attrList) > 3:
self.variable4 = attrList[3]
else:
self.variable4 = "(None)"
self.attributeValuesDict = args.get("customValueOrderDict", None)
self.updateGraphAndPermList()
def getShownAttributeList(self):
attrList = [self.variable1, self.variable2, self.variable3, self.variable4]
while "(None)" in attrList: attrList.remove("(None)")
while "" in attrList: attrList.remove("")
return attrList
def updateGraphAndPermList(self, **args):
self.removeAllSelections()
# self.permutationList.clear()
if self.exploreAttrPermutations:
attrList = self.getShownAttributeList()
if not getattr(self, "bestPlacements", []) or 0 in [attr in self.bestPlacements[0][1] for attr in
attrList]: # we might have bestPlacements for a different set of attributes
self.setStatusBarText(
"Evaluating different attribute permutations. You can stop evaluation by opening VizRank dialog and pressing 'Stop optimization' button.")
self.bestPlacements = self.optimizationDlg.optimizeCurrentAttributeOrder(attrList, updateGraph=0)
self.setStatusBarText("")
if self.bestPlacements:
self.permutationList.addItems(
["%.2f - %s" % (val, attrs) for (val, attrs, order) in self.bestPlacements])
attrList, valueOrder = self.bestPlacements[0][1], self.bestPlacements[0][2]
self.attributeValuesDict = dict([(attrList[i], tuple(valueOrder[i])) for i in range(len(attrList))])
self.updateGraph(**args)
# ############################################################################
# updateGraph - gets called every time the graph has to be updated
def updateGraph(self, data=-1, subsetData=-1, attrList=-1, **args):
# do we want to erase previous diagram?
if args.get("erasePrevious", 1):
for item in list(self.canvas.items()):
if not isinstance(item, SelectionRectangle):
self.canvas.removeItem(item) # remove all canvas items, except SelectionCurves
self.names = []
if data == -1:
data = self.data
if subsetData == -1:
subsetData = self.subset_data
if attrList == -1:
attrList = [self.variable1, self.variable2, self.variable3, self.variable4]
if data == None: return
while "(None)" in attrList: attrList.remove("(None)")
while "" in attrList: attrList.remove("")
if attrList == []:
return
selectList = attrList
if type(data) == SqlTable and data.domain.class_var:
cv = data.domain.class_var # shranim class_var, ker se v naslednji vrstici zbrise (v primeru da je SqlTable)
data = data[:, attrList + [data.domain.class_var]]
data.domain.class_var = cv
elif data.domain.class_var:
cv = data.domain.class_var # shranim class_var, ker se v naslednji vrstici zbrise (v primeru da si izbral atribut, ki je class_var)
name = data.name
data = data[:, attrList + [data.domain.class_var]]
data.domain.class_var = cv
data.name = name
else:
data = data[:, attrList]
# TODO: preveri kaj je stem
# data = Preprocessor_dropMissing(data)
if len(data) == 0:
self.warning(5,
"No data instances with valid values for currently visualized attributes.")
return
else:
self.warning(5)
self.aprioriDistributions = []
if self.interior_coloring == PEARSON:
self.aprioriDistributions = [get_distribution(data, attr) for attr in attrList]
def get_max_label_width(attr):
values = self.attributeValuesDict.get(attr, None) or get_variable_values_sorted(self.data.domain[attr])
maxw = 0
for val in values:
t = OWCanvasText(self.canvas, str(val), 0, 0, bold=0, show=False)
maxw = max(int(t.boundingRect().width()), maxw)
return maxw
if args.get("positions"):
xOff, yOff, squareSize = args.get("positions")
else:
# get the maximum width of rectangle
xOff = 20
width = 20
if len(attrList) > 1:
text = OWCanvasText(self.canvas, attrList[1], bold=1, show=0)
self.max_ylabel_w1 = min(get_max_label_width(attrList[1]), 150)
width = 5 + text.boundingRect().height() + self.attributeValueOffset + self.max_ylabel_w1
xOff = width
if len(attrList) == 4:
text = OWCanvasText(self.canvas, attrList[3], bold=1, show=0)
self.max_ylabel_w2 = min(get_max_label_width(attrList[3]), 150)
width += text.boundingRect().height() + self.attributeValueOffset + self.max_ylabel_w2 - 10
# get the maximum height of rectangle
height = 100
yOff = 45
squareSize = min(self.canvas_view.width() - width - 20, self.canvas_view.height() - height - 20)
if squareSize < 0: return # canvas is too small to draw rectangles
self.canvas_view.setSceneRect(0, 0, self.canvas_view.width(), self.canvas_view.height())
self.legend = {} # dictionary that tells us, for what attributes did we already show the legend
for attr in attrList:
self.legend[attr] = 0
self.drawnSides = dict([(0, 0), (1, 0), (2, 0), (3, 0)])
self.drawPositions = {}
if not getattr(self, "attributeValuesDict", None):
self.attributeValuesDict = self.manualAttributeValuesDict
# compute distributions
self.conditionalDict, self.distributionDict = self.getConditionalDistributions(data, attrList)
self.conditionalSubsetDict = self.distributionSubsetDict = None
if subsetData:
self.conditionalSubsetDict, self.distributionSubsetDict = \
self.getConditionalDistributions(subsetData, attrList)
# draw rectangles
self.DrawData(attrList, (xOff, xOff + squareSize), (yOff, yOff + squareSize), 0, "", len(attrList), **args)
if args.get("drawLegend", 1):
self.DrawLegend(data, (xOff, xOff + squareSize), (yOff, yOff + squareSize)) # draw class legend
if args.get("drillUpdateSelection", 1):
# self.optimizationDlg.mtUpdateState()
pass
# self.canvas.update()
# create a dictionary with all possible pairs of "combination-of-attr-values" : count
## TODO: this function is used both in owmosaic and owsieve --> where to put it?
def getConditionalDistributions(self, data, attrs):
cond_dist = defaultdict(int)
dist = defaultdict(int)
cond_dist[""] = dist[""] = len(data)
all_attrs = [data.domain[a] for a in attrs]
if data.domain.class_var is not None:
all_attrs.append(data.domain.class_var)
for i in range(1, len(all_attrs) + 1):
attr = all_attrs[:i]
if type(data) == SqlTable:
# make all possible pairs of attributes + class_var
attr = [a.to_sql() for a in attr]
fields = attr + ["COUNT(*)"]
query = data._sql_query(fields, group_by=attr)
with data._execute_sql_query(query) as cur:
res = cur.fetchall()
for r in res:
str_values = [a.repr_val(a.to_val(x)) for a, x in zip(all_attrs, r[:-1])]
str_values = [x if x != '?' else 'None' for x in str_values]
cond_dist['-'.join(str_values)] = r[-1]
dist[str_values[-1]] += r[-1]
else:
for indices in product(*(range(len(a.values)) for a in attr)):
vals = []
conditions = []
for k, ind in enumerate(indices):
vals.append(attr[k].values[ind])
fd = filter.FilterDiscrete(column=attr[k], values=[attr[k].values[ind]])
conditions.append(fd)
filt = filter.Values(conditions)
filtdata = filt(data)
cond_dist['-'.join(vals)] = len(filtdata)
dist[vals[-1]] += len(filtdata)
return cond_dist, dist
# ############################################################################
# ############################################################################
## DRAW DATA - draw rectangles for attributes in attrList inside rect (x0,x1), (y0,y1)
def DrawData(self, attrList, x0_x1, y0_y1, side, condition, totalAttrs, used_attrs=[], used_vals=[],
attrVals="", **args):
x0, x1 = x0_x1
y0, y1 = y0_y1
if self.conditionalDict[attrVals] == 0:
self.addRect(x0, x1, y0, y1, "", used_attrs, used_vals, attrVals=attrVals)
self.DrawText(side, attrList[0], (x0, x1), (y0, y1), totalAttrs, used_attrs, used_vals,
attrVals) # store coordinates for later drawing of labels
return
attr = attrList[0]
edge = len(attrList) * self._cellspace # how much smaller rectangles do we draw
values = self.attributeValuesDict.get(attr, None) or get_variable_values_sorted(self.data.domain[attr])
if side % 2: values = values[::-1] # reverse names if necessary
if side % 2 == 0: # we are drawing on the x axis
whole = max(0, (x1 - x0) - edge * (
len(values) - 1)) # we remove the space needed for separating different attr. values
if whole == 0: edge = (x1 - x0) / float(len(values) - 1)
else: # we are drawing on the y axis
whole = max(0, (y1 - y0) - edge * (len(values) - 1))
if whole == 0: edge = (y1 - y0) / float(len(values) - 1)
if attrVals == "":
counts = [self.conditionalDict[val] for val in values]
else:
counts = [self.conditionalDict[attrVals + "-" + val] for val in values]
total = sum(counts)
# if we are visualizing the third attribute and the first attribute has the last value, we have to reverse the order in which the boxes will be drawn
# otherwise, if the last cell, nearest to the labels of the fourth attribute, is empty, we wouldn't be able to position the labels
valRange = list(range(len(values)))
if len(attrList + used_attrs) == 4 and len(used_attrs) == 2:
attr1Values = self.attributeValuesDict.get(used_attrs[0], None) or get_variable_values_sorted(
self.data.domain[used_attrs[0]])
if used_vals[0] == attr1Values[-1]:
valRange = valRange[::-1]
for i in valRange:
start = i * edge + whole * float(sum(counts[:i]) / float(total))
end = i * edge + whole * float(sum(counts[:i + 1]) / float(total))
val = values[i]
htmlVal = getHtmlCompatibleString(val)
if attrVals != "":
newAttrVals = attrVals + "-" + val
else:
newAttrVals = val
if side % 2 == 0: # if we are moving horizontally
if len(attrList) == 1:
self.addRect(x0 + start, x0 + end, y0, y1,
condition + 4 * " " + attr + ": <b>" + htmlVal + "</b><br>", used_attrs + [attr],
used_vals + [val], newAttrVals, **args)
else:
self.DrawData(attrList[1:], (x0 + start, x0 + end), (y0, y1), side + 1,
condition + 4 * " " + attr + ": <b>" + htmlVal + "</b><br>", totalAttrs,
used_attrs + [attr], used_vals + [val], newAttrVals, **args)
else:
if len(attrList) == 1:
self.addRect(x0, x1, y0 + start, y0 + end,
condition + 4 * " " + attr + ": <b> " + htmlVal + "</b><br>", used_attrs + [attr],
used_vals + [val], newAttrVals, **args)
else:
self.DrawData(attrList[1:], (x0, x1), (y0 + start, y0 + end), side + 1,
condition + 4 * " " + attr + ": <b>" + htmlVal + "</b><br>", totalAttrs,
used_attrs + [attr], used_vals + [val], newAttrVals, **args)
self.DrawText(side, attrList[0], (x0, x1), (y0, y1), totalAttrs, used_attrs, used_vals, attrVals)
######################################################################
## DRAW TEXT - draw legend for all attributes in attrList and their possible values
def DrawText(self, side, attr, x0_x1, y0_y1, totalAttrs, used_attrs, used_vals, attrVals):
x0, x1 = x0_x1
y0, y1 = y0_y1
if self.drawnSides[side]: return
# the text on the right will be drawn when we are processing visualization of the last value of the first attribute
if side == RIGHT:
attr1Values = self.attributeValuesDict.get(used_attrs[0], None) or get_variable_values_sorted(
self.data.domain[used_attrs[0]])
if used_vals[0] != attr1Values[-1]:
return
if not self.conditionalDict[attrVals]:
if side not in self.drawPositions: self.drawPositions[side] = (x0, x1, y0, y1)
return
else:
if side in self.drawPositions: (x0, x1, y0, y1) = self.drawPositions[
side] # restore the positions where we have to draw the attribute values and attribute name
self.drawnSides[side] = 1
values = self.attributeValuesDict.get(attr, None) or get_variable_values_sorted(self.data.domain[attr])
if side % 2: values = values[::-1]
width = x1 - x0 - (side % 2 == 0) * self._cellspace * (totalAttrs - side) * (len(values) - 1)
height = y1 - y0 - (side % 2 == 1) * self._cellspace * (totalAttrs - side) * (len(values) - 1)
# calculate position of first attribute
currPos = 0
if attrVals == "":
counts = [self.conditionalDict.get(val, 1) for val in values]
else:
counts = [self.conditionalDict.get(attrVals + "-" + val, 1) for val in values]
total = sum(counts)
if total == 0:
counts = [1] * len(values)
total = sum(counts)
for i in range(len(values)):
val = values[i]
perc = counts[i] / float(total)
hide_value = self.remove_unused_labels and self.distributionDict[val] == 0
if not hide_value:
if side == 0:
OWCanvasText(self.canvas, str(val), x0 + currPos + width * 0.5 * perc,
y1 + self.attributeValueOffset,
Qt.AlignTop | Qt.AlignHCenter, bold=0)
elif side == 1:
OWCanvasText(self.canvas, str(val), x0 - self.attributeValueOffset,
y0 + currPos + height * 0.5 * perc,
Qt.AlignRight | Qt.AlignVCenter, bold=0)
elif side == 2:
OWCanvasText(self.canvas, str(val), x0 + currPos + width * perc * 0.5,
y0 - self.attributeValueOffset,
Qt.AlignHCenter | Qt.AlignBottom, bold=0)
else:
OWCanvasText(self.canvas, str(val), x1 + self.attributeValueOffset,
y0 + currPos + height * 0.5 * perc,
Qt.AlignLeft | Qt.AlignVCenter, bold=0)
if side % 2 == 0:
currPos += perc * width + self._cellspace * (totalAttrs - side)
else:
currPos += perc * height + self._cellspace * (totalAttrs - side)
if side == 0:
OWCanvasText(self.canvas, attr, x0 + (x1 - x0) / 2,
y1 + self.attributeValueOffset + self.attributeNameOffset, Qt.AlignTop | Qt.AlignHCenter,
bold=1)
elif side == 1:
OWCanvasText(self.canvas, attr, x0 - self.max_ylabel_w1 - self.attributeValueOffset, y0 + (y1 - y0) / 2,
Qt.AlignRight | Qt.AlignVCenter, bold=1, vertical=True)
elif side == 2:
OWCanvasText(self.canvas, attr, x0 + (x1 - x0) / 2,
y0 - self.attributeValueOffset - self.attributeNameOffset, Qt.AlignBottom | Qt.AlignHCenter,
bold=1)
else:
OWCanvasText(self.canvas, attr, x1 + self.max_ylabel_w2 + self.attributeValueOffset, y0 + (y1 - y0) / 2,
Qt.AlignLeft | Qt.AlignVCenter, bold=1, vertical=True)
# draw a rectangle, set it to back and add it to rect list
def addRect(self, x0, x1, y0, y1, condition="", used_attrs=[], used_vals=[], attrVals="", **args):
if x0 == x1:
x1 += 1
if y0 == y1:
y1 += 1
if x1 - x0 + y1 - y0 == 2:
y1 += 1 # if we want to show a rectangle of width and height 1 it doesn't show anything. in such cases we therefore have to increase size of one edge
if ("selectionDict" in args and
tuple(used_vals) in args["selectionDict"]):
d = 2
OWCanvasRectangle(self.canvas, x0 - d, y0 - d, x1 - x0 + 1 + 2 * d, y1 - y0 + 1 + 2 * d,
penColor=args["selectionDict"][tuple(used_vals)], penWidth=2, z=-100)
# if we have selected a rule that contains this combination of attr values then show a kind of selection of this rectangle
if self.activeRule and len(used_attrs) == len(self.activeRule[0]) and sum(
[v in used_attrs for v in self.activeRule[0]]) == len(self.activeRule[0]):
for vals in self.activeRule[1]:
if used_vals == [vals[self.activeRule[0].index(a)] for a in used_attrs]:
values = list(
self.attributeValuesDict.get(self.data.domain.classVar.name, [])) or get_variable_values_sorted(
self.data.domain.class_var)
counts = [self.conditionalDict[attrVals + "-" + val] for val in values]
d = 2
r = OWCanvasRectangle(self.canvas, x0 - d, y0 - d, x1 - x0 + 2 * d + 1, y1 - y0 + 2 * d + 1, z=50)
r.setPen(QPen(self.colorPalette[counts.index(max(counts))], 2, Qt.DashLine))
aprioriDist = ()
pearson = None
expected = None
outerRect = OWCanvasRectangle(self.canvas, x0, y0, x1 - x0, y1 - y0, z=30)
if not self.conditionalDict[attrVals]: return
# we have to remember which conditions were new in this update so that
# when we right click we can only remove the last added selections
if self.selectionRectangle is not None and \
self.selectionRectangle.collidesWithItem(outerRect):
if tuple(used_vals) in self.selectionConditions and \
self.key_modifier & (Qt.AltModifier
| Qt.ControlModifier):
self.selectionConditions.remove(tuple(used_vals))
elif tuple(used_vals) not in self.selectionConditions:
self.recentlyAdded += [tuple(used_vals)]
if self.key_modifier & (Qt.ControlModifier | Qt.ShiftModifier):
self.selectionConditions = self.selectionConditions \
+ [tuple(used_vals)]
elif not self.key_modifier & (Qt.AltModifier | Qt.ShiftModifier
| Qt.ControlModifier):
self.selectionConditions = self.recentlyAdded
# show rectangle selected or not
if tuple(used_vals) in self.selectionConditions:
outerRect.setPen(QPen(Qt.black, 3, Qt.DotLine))
if (self.interior_coloring == CLASS_DISTRIBUTION and
not self.data.domain.has_discrete_class):
return
# draw pearsons residuals
if (self.interior_coloring == PEARSON or
not self.data.domain.has_discrete_class):
s = sum(self.aprioriDistributions[0])
expected = s * reduce(lambda x, y: x * y,
[self.aprioriDistributions[i][used_vals[i]] / float(s) for i in
range(len(used_vals))])
actual = self.conditionalDict[attrVals]
pearson = float(actual - expected) / sqrt(expected)
if abs(pearson) < 2:
ind = 0
elif abs(pearson) < 4:
ind = 1
elif abs(pearson) < 8:
ind = 2
else:
ind = 3
if pearson > 0:
color = self.blue_colors[ind]
else:
color = self.red_colors[ind]
OWCanvasRectangle(self.canvas, x0, y0, x1 - x0, y1 - y0, color, color, z=-20)
# draw class distribution - actual and apriori
# we do have a discrete class
else:
clsValues = list(
self.attributeValuesDict.get(self.data.domain.class_var.name, [])) or get_variable_values_sorted(
self.data.domain.class_var)
aprioriDist = get_distribution(self.data, self.data.domain.class_var.name)
total = 0
for i in range(len(clsValues)):
val = self.conditionalDict[attrVals + "-" + clsValues[i]]
if val == 0:
continue
if i == len(clsValues) - 1:
v = y1 - y0 - total
else:
v = ((y1 - y0) * val) / self.conditionalDict[attrVals]
OWCanvasRectangle(self.canvas, x0, y0 + total, x1 - x0, v, self.colorPalette[i],
self.colorPalette[i], z=-20)
total += v
# show apriori boxes and lines
if (self.show_apriori_distribution_lines or self.use_boxes) and \
abs(x1 - x0) > self._box_size and \
abs(y1 - y0) > self._box_size:
apriori = [aprioriDist[val] / float(len(self.data))
for val in clsValues]
if self.show_apriori_distribution_boxes or \
self.data.domain.class_var.name in used_attrs:
box_counts = apriori
else:
contingencies = \
self.optimizationDlg.getContingencys(used_attrs)
box_counts = []
for clsVal in clsValues:
# compute: P(c_i) * prod (P(c_i|attr_k) / P(c_i))
# for each class value
pci = aprioriDist[clsVal] / float(sum(aprioriDist.values()))
tempVal = pci
if pci > 0:
# tempVal = 1.0 / Pci
for ua, uv in zip(used_attrs, used_vals):
tempVal *= contingencies[ua][uv] / pci
box_counts.append(tempVal)
# boxCounts.append(aprioriDist[val]/float(sum(aprioriDist.values())) * reduce(operator.mul, [contingencies[used_attrs[i]][used_vals[i]][clsVal]/float(sum(contingencies[used_attrs[i]][used_vals[i]].values())) for i in range(len(used_attrs))]))
total1 = 0
total2 = 0
if self.use_boxes:
OWCanvasLine(self.canvas, x0 + self._box_size, y0, x0 + self._box_size, y1, z=30)
for i in range(len(clsValues)):
val1 = apriori[i]
if self.show_apriori_distribution_boxes:
val2 = apriori[i]
else:
val2 = box_counts[i] / float(sum(box_counts))
if i == len(clsValues) - 1:
v1 = y1 - y0 - total1
v2 = y1 - y0 - total2
else:
v1 = (y1 - y0) * val1
v2 = (y1 - y0) * val2
x, y, w, h, xL1, yL1, xL2, yL2 = x0, y0 + total2, self._box_size, v2, x0, y0 + total1 + v1, x1, y0 + total1 + v1
if self.use_boxes:
OWCanvasRectangle(self.canvas, x, y, w, h, self.colorPalette[i], self.colorPalette[i], z=20)
if i < len(clsValues) - 1 and self.show_apriori_distribution_lines:
OWCanvasLine(self.canvas, xL1, yL1, xL2, yL2, z=10, penColor=self._apriori_pen_color)
total1 += v1
total2 += v2
# show subset distribution
if self.conditionalSubsetDict:
# show a rect around the box if subset examples belong to this box
if self.conditionalSubsetDict[attrVals]:
# counts = [self.conditionalSubsetDict[attrVals + "-" + val] for val in clsValues]
# if sum(counts) == 1: color = self.colorPalette[counts.index(1)]
# else: color = Qt.black
# OWCanvasRectangle(self.canvas, x0-2, y0-2, x1-x0+5, y1-y0+5, color, QColor(Qt.white), penWidth = 2, z=-50, penStyle = Qt.DashLine)
counts = [self.conditionalSubsetDict[attrVals + "-" + val] for val in clsValues]
if sum(counts) == 1:
OWCanvasRectangle(self.canvas, x0 - 2, y0 - 2, x1 - x0 + 5, y1 - y0 + 5,
self.colorPalette[counts.index(1)], QColor(Qt.white), penWidth=2, z=-50,
penStyle=Qt.DashLine)
if self.show_subset_data_boxes: # do we want to show exact distribution in the right edge of each cell
OWCanvasLine(self.canvas, x1 - self._box_size, y0, x1 - self._box_size, y1, z=30)
total = 0
for i in range(len(aprioriDist)):
val = self.conditionalSubsetDict[attrVals + "-" + clsValues[i]]
if not self.conditionalSubsetDict[attrVals] or val == 0: continue
if i == len(aprioriDist) - 1:
v = y1 - y0 - total
else:
v = ((y1 - y0) * val) / float(self.conditionalSubsetDict[attrVals])
OWCanvasRectangle(self.canvas, x1 - self._box_size, y0 + total, self._box_size, v,
self.colorPalette[i], self.colorPalette[i], z=15)
total += v
tooltipText = "Examples in this area have:<br>" + condition
if any(aprioriDist):
clsValues = list(
self.attributeValuesDict.get(self.data.domain.class_var.name, [])) or get_variable_values_sorted(
self.data.domain.class_var)
actual = [self.conditionalDict[attrVals + "-" + clsValues[i]] for i in range(len(aprioriDist))]
if sum(actual) > 0:
apriori = [aprioriDist[key] for key in clsValues]
aprioriText = ""
actualText = ""
text = ""
for i in range(len(clsValues)):
text += 4 * " " + "<b>%s</b>: %d / %.1f%% (Expected %.1f / %.1f%%)<br>" % (
clsValues[i], actual[i], 100.0 * actual[i] / float(sum(actual)),
(apriori[i] * sum(actual)) / float(sum(apriori)), 100.0 * apriori[i] / float(sum(apriori)))
tooltipText += "Number of examples: " + str(int(sum(actual))) + "<br> Class distribution:<br>" + text[
:-4]
elif pearson and expected:
tooltipText += "<hr>Expected number of examples: %.1f<br>Actual number of examples: %d<br>Standardized (Pearson) residual: %.1f" % (
expected, self.conditionalDict[attrVals], pearson)
outerRect.setToolTip(tooltipText)
# draw the class legend below the square
def DrawLegend(self, data, x0_x1, y0_y1):
x0, x1 = x0_x1
y0, y1 = y0_y1
if (self.interior_coloring == CLASS_DISTRIBUTION and
data.domain.has_continuous_class):
return
if self.interior_coloring == PEARSON:
names = ["<-8", "-8:-4", "-4:-2", "-2:2", "2:4", "4:8", ">8", "Residuals:"]
colors = self.red_colors[::-1] + self.blue_colors[1:]
else:
names = (list(self.attributeValuesDict.get(data.domain.class_var.name, [])) or get_variable_values_sorted(
data.domain.class_var)) + [data.domain.class_var.name + ":"]
colors = [self.colorPalette[i] for i in range(len(data.domain.class_var.values))]
self.names = [OWCanvasText(self.canvas, name, alignment=Qt.AlignVCenter) for name in names]
totalWidth = sum([text.boundingRect().width() for text in self.names])
# compute the x position of the center of the legend
y = y1 + self.attributeNameOffset + self.attributeValueOffset + 35
distance = 30
startX = (x0 + x1) / 2 - (totalWidth + (len(names)) * distance) / 2
self.names[-1].setPos(startX + 15, y)
self.names[-1].show()
xOffset = self.names[-1].boundingRect().width() + distance
size = 8 # 8 + 8*(self.interiorColoring == PEARSON)
for i in range(len(names) - 1):
if self.interior_coloring == PEARSON:
edgeColor = Qt.black
else:
edgeColor = colors[i]
OWCanvasRectangle(self.canvas, startX + xOffset, y - size / 2, size, size, edgeColor, colors[i])
self.names[i].setPos(startX + xOffset + 10, y)
xOffset += distance + self.names[i].boundingRect().width()
# def saveToFileCanvas(self):
# sizeDlg = OWDlgs.OWChooseImageSizeDlg(self.canvas, parent=self)
# sizeDlg.exec_()
def setColors(self):
dlg = self.createColorDialog()
if dlg.exec_():
self.color_settings = dlg.getColorSchemas()
self.selected_schema_index = dlg.selectedSchemaIndex
self.colorPalette = dlg.getDiscretePalette("discPalette")
if self.data and self.data.domain.has_discrete_class:
self.colorPalette.set_number_of_colors(len(self.data.domain.class_var.values))
self.updateGraph()
def createColorDialog(self):
c = ColorPaletteDlg(self, "Color Palette")
c.createDiscretePalette("discPalette", "Discrete Palette",
DefaultRGBColors) # defaultColorBrewerPalette)
c.setColorSchemas(self.color_settings, self.selected_schema_index)
return c
# ########################################
# cell/example selection
def sendSelectedData(self):
selected_data = None
if self.data and not isinstance(self.data, SqlTable):
attributes = self.getShownAttributeList()
row_indices = []
for i, row in enumerate(self.data):
for condition in self.selectionConditions:
if len([attr for attr, val in zip(attributes, condition)
if row[attr] == val]) == len(condition):
row_indices.append(i)
selected_data = Table.from_table_rows(self.data, row_indices)
self.send("Selected Data", selected_data)
# add a new rectangle. update the graph and see which mosaics does it intersect. add this mosaics to the recentlyAdded list
def addSelection(self, rect):
self.selectionRectangle = rect
self.updateGraph(drillUpdateSelection=0)
self.sendSelectedData()
self.recentlyAdded = []
# self.optimizationDlg.mtUpdateState() # we have already called this in self.updateGraph() call
self.selectionRectangle = None
def removeAllSelections(self):
self.selectionConditions = []
## self.optimizationDlg.mtUpdateState() # removeAllSelections is always called before updateGraph() - where mtUpdateState is called
self.sendSelectedData()
def saveSettings(self):
OWWidget.saveSettings(self)
# self.optimizationDlg.saveSettings()
class SortAttributeValuesDlg(OWWidget):
name = "Sort Attribute Values"
def __init__(self, attr="", valueList=[]):
super().__init__(self)
box1 = gui.widgetBox(self, "Select Value Order for Attribute \"" + attr + '"', orientation="horizontal")
self.attributeList = gui.listBox(box1, self, selectionMode=QListWidget.ExtendedSelection, enableDragDrop=1)
self.attributeList.addItems(valueList)
vbox = gui.widgetBox(box1, "", orientation="vertical")
self.buttonUPAttr = gui.button(vbox, self, "", callback=self.moveAttrUP,
tooltip="Move selected attribute values up")
self.buttonDOWNAttr = gui.button(vbox, self, "", callback=self.moveAttrDOWN,
tooltip="Move selected attribute values down")
self.buttonUPAttr.setIcon(QIcon(gui.resource_filename("icons/Dlg_up3.png")))
self.buttonUPAttr.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Expanding))
self.buttonUPAttr.setFixedWidth(40)
self.buttonDOWNAttr.setIcon(QIcon(gui.resource_filename("icons/Dlg_down3.png")))
self.buttonDOWNAttr.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Expanding))
self.buttonDOWNAttr.setFixedWidth(40)
box2 = gui.widgetBox(self, 1, orientation="horizontal")
self.okButton = gui.button(box2, self, "OK", callback=self.accept)
self.cancelButton = gui.button(box2, self, "Cancel", callback=self.reject)
self.resize(300, 300)
# move selected attribute values
def moveAttrUP(self):
for i in range(1, self.attributeList.count()):
if self.attributeList.item(i).isSelected():
self.attributeList.insertItem(i - 1, self.attributeList.item(i).text())
self.attributeList.takeItem(i + 1)
self.attributeList.item(i - 1).setSelected(True)
def moveAttrDOWN(self):
for i in range(self.attributeList.count() - 2, -1, -1):
if self.attributeList.item(i).isSelected():
self.attributeList.insertItem(i + 2, self.attributeList.item(i).text())
self.attributeList.item(i + 2).setSelected(True)
self.attributeList.takeItem(i)
def save_graph(self):
from Orange.widgets.data.owsave import OWSave
save_img = OWSave(parent=self, data=self.canvas,
file_formats=FileFormats.img_writers)
save_img.exec_()
class OWCanvasText(QGraphicsTextItem):
def __init__(self, canvas, text="", x=0, y=0, alignment=Qt.AlignLeft | Qt.AlignTop, bold=0, font=None, z=0,
htmlText=None, tooltip=None, show=1, vertical=False):
QGraphicsTextItem.__init__(self, text, None, canvas)
if font:
self.setFont(font)
if bold:
font = self.font()
font.setBold(bold)
self.setFont(font)
if htmlText:
self.setHtml(htmlText)
self.alignment = alignment
self.vertical = vertical
if vertical:
self.setRotation(-90)
self.setPos(x, y)
self.x, self.y = x, y
self.setZValue(z)
if tooltip: self.setToolTip(tooltip)
if show:
self.show()
else:
self.hide()
def setPos(self, x, y):
self.x, self.y = x, y
rect = QGraphicsTextItem.boundingRect(self)
if self.vertical:
h, w = rect.height(), rect.width()
rect.setWidth(h)
rect.setHeight(-w)
if int(self.alignment & Qt.AlignRight):
x -= rect.width()
elif int(self.alignment & Qt.AlignHCenter):
x -= rect.width() / 2.
if int(self.alignment & Qt.AlignBottom):
y -= rect.height()
elif int(self.alignment & Qt.AlignVCenter):
y -= rect.height() / 2.
QGraphicsTextItem.setPos(self, x, y)
def OWCanvasRectangle(canvas, x=0, y=0, width=0, height=0, penColor=QColor(128, 128, 128), brushColor=None, penWidth=1,
z=0,
penStyle=Qt.SolidLine, pen=None, tooltip=None, show=1):
rect = QGraphicsRectItem(x, y, width, height, None, canvas)
if brushColor: rect.setBrush(QBrush(brushColor))
if pen:
rect.setPen(pen)
else:
rect.setPen(QPen(penColor, penWidth, penStyle))
rect.setZValue(z)
if tooltip: rect.setToolTip(tooltip)
if show:
rect.show()
else:
rect.hide()
return rect
def OWCanvasLine(canvas, x1=0, y1=0, x2=0, y2=0, penWidth=2, penColor=QColor(255, 255, 255, 128), pen=None, z=0,
tooltip=None, show=1):
r = QGraphicsLineItem(x1, y1, x2, y2, None, canvas)
if pen != None:
r.setPen(pen)
else:
r.setPen(QPen(penColor, penWidth))
r.setZValue(z)
if tooltip: r.setToolTip(tooltip)
if show:
r.show()
else:
r.hide()
return r
def OWCanvasEllipse(canvas, x=0, y=0, width=0, height=0, penWidth=1, startAngle=0, angles=360, penColor=Qt.black,
brushColor=None, z=0, penStyle=Qt.SolidLine, pen=None, tooltip=None, show=1):
e = QGraphicsEllipseItem(x, y, width, height, None, canvas)
e.setZValue(z)
if brushColor != None:
e.setBrush(QBrush(brushColor))
if pen != None:
e.setPen(pen)
else:
e.setPen(QPen(penColor, penWidth))
e.setStartAngle(startAngle)
e.setSpanAngle(angles * 16)
if tooltip: e.setToolTip(tooltip)
if show:
e.show()
else:
e.hide()
return e
# test widget appearance
if __name__ == "__main__":
a = QApplication(sys.argv)
ow = OWMosaicDisplay()
ow.show()
data = Table("zoo.tab")
ow.setData(data)
ow.handleNewSignals()
a.exec_()
|
af1rst/bite-project | refs/heads/master | deps/mrtaskman/server/third_party/prodeagle/counter.py | 16 | #!/usr/bin/env python
#
# Copyright 2011 MiuMeet AG.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from third_party.prodeagle import counter_names
import logging
from third_party.prodeagle import config
def incr(name, delta=1, save_stats=config.SAVE_PRODEAGLE_STATS):
if delta:
incrBatch({ name : delta }, save_stats)
class Batch():
def __init__(self):
self.pending = {}
def incr(self, name, delta=1):
if delta:
self.pending[name] = self.pending.get(name, 0) + delta
def commit(self, save_stats=config.SAVE_PRODEAGLE_STATS):
if self.pending:
incrBatch(self.pending, save_stats)
self.pending = {}
def incrBatch(counters, save_stats=config.SAVE_PRODEAGLE_STATS):
try:
cnm = counter_names.getDefaultCounterNamesManager()
slot = counter_names.getEpochRounded()
prefixed_counters = {}
for name in counters:
prefixed_counters[str(slot) + name] = counters[name]
save_in_between_name = None
if config.SAVE_IN_BETWEEN:
save_in_between_name = ("save_in_between_%d" %
counter_names.getEpochRounded(utc_datetime=None,
slot_size=config.SAVE_IN_BETWEEN))
prefixed_counters[save_in_between_name] = 1
existing = memcache.offset_multi(prefixed_counters,
namespace=cnm.namespace,
initial_value=0)
new_counter_names = []
for name in counters:
if (counters[name] == existing[str(slot) + name]):
new_counter_names += [name]
(data_store_access, n_added_names) = cnm.addIfNew(new_counter_names)
if config.SAVE_IN_BETWEEN and existing[save_in_between_name] == 1:
try:
taskqueue.Task(url=config.PRODEAGLE_HARVEST_URL,
params={"save_in_between": "1"},
countdown=config.SAVE_IN_BETWEEN,
name="prodeagle-" + save_in_between_name).add()
except:
pass
if save_stats:
counters = Batch()
if data_store_access:
counters.incr("ProdEagle.Datastore.ReadAccess")
if n_added_names:
counters.incr("ProdEagle.NewNames", n_added_names)
counters.incr("ProdEagle.Datastore.WriteAccess")
if config.SAVE_IN_BETWEEN and existing[save_in_between_name] == 1:
counters.incr("ProdEagle.SaveInBetween")
counters.commit(save_stats=False)
except:
logging.warning("Couldn't increase the following counters: %s"
% ", ".join(counters.keys()))
|
mith1979/ansible_automation | refs/heads/master | applied_python/applied_python/lib/python2.7/site-packages/pylint/test/functional/anomalous_unicode_escape_py3.py | 13 | # pylint:disable=W0105, W0511
"""Test for backslash escapes in byte vs unicode strings"""
# Would be valid in Unicode, but probably not what you want otherwise
BAD_UNICODE = b'\u0042' # [anomalous-unicode-escape-in-string]
BAD_LONG_UNICODE = b'\U00000042' # [anomalous-unicode-escape-in-string]
# +1:[anomalous-unicode-escape-in-string]
BAD_NAMED_UNICODE = b'\N{GREEK SMALL LETTER ALPHA}'
GOOD_UNICODE = u'\u0042'
GOOD_LONG_UNICODE = u'\U00000042'
GOOD_NAMED_UNICODE = u'\N{GREEK SMALL LETTER ALPHA}'
# Valid raw strings
RAW_BACKSLASHES = r'raw'
# In a comment you can have whatever you want: \ \\ \n \m
# even things that look like bad strings: "C:\Program Files"
|
sanyaade-teachings/oppia | refs/heads/master | main_cron.py | 30 | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main package for URL routing and the index page."""
__author__ = 'Sean Lip'
import feconf
from core.controllers import cron
from core.platform import models
transaction_services = models.Registry.import_transaction_services()
import main
import webapp2
# Register the URLs with the classes responsible for handling them.
urls = [
main.get_redirect_route(
r'/cron/mail/admin/job_status', cron.JobStatusMailerHandler,
'job_failure_mailer'),
main.get_redirect_route(
r'/cron/jobs/cleanup', cron.CronMapreduceCleanupHandler,
'job_cleanup_handler'),
]
app = transaction_services.toplevel_wrapper(
webapp2.WSGIApplication(urls, debug=feconf.DEBUG))
|
ianyh/heroku-buildpack-python-opencv | refs/heads/master | vendor/.heroku/lib/python2.7/distutils/version.py | 259 | #
# distutils/version.py
#
# Implements multiple version numbering conventions for the
# Python Module Distribution Utilities.
#
# $Id$
#
"""Provides classes to represent module version numbers (one class for
each style of version numbering). There are currently two such classes
implemented: StrictVersion and LooseVersion.
Every version number class implements the following interface:
* the 'parse' method takes a string and parses it to some internal
representation; if the string is an invalid version number,
'parse' raises a ValueError exception
* the class constructor takes an optional string argument which,
if supplied, is passed to 'parse'
* __str__ reconstructs the string that was passed to 'parse' (or
an equivalent string -- ie. one that will generate an equivalent
version number instance)
* __repr__ generates Python code to recreate the version number instance
* __cmp__ compares the current instance with either another instance
of the same class or a string (which will be parsed to an instance
of the same class, thus must follow the same rules)
"""
import string, re
from types import StringType
class Version:
"""Abstract base class for version numbering classes. Just provides
constructor (__init__) and reproducer (__repr__), because those
seem to be the same for all version numbering classes.
"""
def __init__ (self, vstring=None):
if vstring:
self.parse(vstring)
def __repr__ (self):
return "%s ('%s')" % (self.__class__.__name__, str(self))
# Interface for version-number classes -- must be implemented
# by the following classes (the concrete ones -- Version should
# be treated as an abstract class).
# __init__ (string) - create and take same action as 'parse'
# (string parameter is optional)
# parse (string) - convert a string representation to whatever
# internal representation is appropriate for
# this style of version numbering
# __str__ (self) - convert back to a string; should be very similar
# (if not identical to) the string supplied to parse
# __repr__ (self) - generate Python code to recreate
# the instance
# __cmp__ (self, other) - compare two version numbers ('other' may
# be an unparsed version string, or another
# instance of your version class)
class StrictVersion (Version):
"""Version numbering for anal retentives and software idealists.
Implements the standard interface for version number classes as
described above. A version number consists of two or three
dot-separated numeric components, with an optional "pre-release" tag
on the end. The pre-release tag consists of the letter 'a' or 'b'
followed by a number. If the numeric components of two version
numbers are equal, then one with a pre-release tag will always
be deemed earlier (lesser) than one without.
The following are valid version numbers (shown in the order that
would be obtained by sorting according to the supplied cmp function):
0.4 0.4.0 (these two are equivalent)
0.4.1
0.5a1
0.5b3
0.5
0.9.6
1.0
1.0.4a3
1.0.4b1
1.0.4
The following are examples of invalid version numbers:
1
2.7.2.2
1.3.a4
1.3pl1
1.3c4
The rationale for this version numbering system will be explained
in the distutils documentation.
"""
version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$',
re.VERBOSE)
def parse (self, vstring):
match = self.version_re.match(vstring)
if not match:
raise ValueError, "invalid version number '%s'" % vstring
(major, minor, patch, prerelease, prerelease_num) = \
match.group(1, 2, 4, 5, 6)
if patch:
self.version = tuple(map(string.atoi, [major, minor, patch]))
else:
self.version = tuple(map(string.atoi, [major, minor]) + [0])
if prerelease:
self.prerelease = (prerelease[0], string.atoi(prerelease_num))
else:
self.prerelease = None
def __str__ (self):
if self.version[2] == 0:
vstring = string.join(map(str, self.version[0:2]), '.')
else:
vstring = string.join(map(str, self.version), '.')
if self.prerelease:
vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
return vstring
def __cmp__ (self, other):
if isinstance(other, StringType):
other = StrictVersion(other)
compare = cmp(self.version, other.version)
if (compare == 0): # have to compare prerelease
# case 1: neither has prerelease; they're equal
# case 2: self has prerelease, other doesn't; other is greater
# case 3: self doesn't have prerelease, other does: self is greater
# case 4: both have prerelease: must compare them!
if (not self.prerelease and not other.prerelease):
return 0
elif (self.prerelease and not other.prerelease):
return -1
elif (not self.prerelease and other.prerelease):
return 1
elif (self.prerelease and other.prerelease):
return cmp(self.prerelease, other.prerelease)
else: # numeric versions don't match --
return compare # prerelease stuff doesn't matter
# end class StrictVersion
# The rules according to Greg Stein:
# 1) a version number has 1 or more numbers separated by a period or by
# sequences of letters. If only periods, then these are compared
# left-to-right to determine an ordering.
# 2) sequences of letters are part of the tuple for comparison and are
# compared lexicographically
# 3) recognize the numeric components may have leading zeroes
#
# The LooseVersion class below implements these rules: a version number
# string is split up into a tuple of integer and string components, and
# comparison is a simple tuple comparison. This means that version
# numbers behave in a predictable and obvious way, but a way that might
# not necessarily be how people *want* version numbers to behave. There
# wouldn't be a problem if people could stick to purely numeric version
# numbers: just split on period and compare the numbers as tuples.
# However, people insist on putting letters into their version numbers;
# the most common purpose seems to be:
# - indicating a "pre-release" version
# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
# - indicating a post-release patch ('p', 'pl', 'patch')
# but of course this can't cover all version number schemes, and there's
# no way to know what a programmer means without asking him.
#
# The problem is what to do with letters (and other non-numeric
# characters) in a version number. The current implementation does the
# obvious and predictable thing: keep them as strings and compare
# lexically within a tuple comparison. This has the desired effect if
# an appended letter sequence implies something "post-release":
# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
#
# However, if letters in a version number imply a pre-release version,
# the "obvious" thing isn't correct. Eg. you would expect that
# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
# implemented here, this just isn't so.
#
# Two possible solutions come to mind. The first is to tie the
# comparison algorithm to a particular set of semantic rules, as has
# been done in the StrictVersion class above. This works great as long
# as everyone can go along with bondage and discipline. Hopefully a
# (large) subset of Python module programmers will agree that the
# particular flavour of bondage and discipline provided by StrictVersion
# provides enough benefit to be worth using, and will submit their
# version numbering scheme to its domination. The free-thinking
# anarchists in the lot will never give in, though, and something needs
# to be done to accommodate them.
#
# Perhaps a "moderately strict" version class could be implemented that
# lets almost anything slide (syntactically), and makes some heuristic
# assumptions about non-digits in version number strings. This could
# sink into special-case-hell, though; if I was as talented and
# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
# just as happy dealing with things like "2g6" and "1.13++". I don't
# think I'm smart enough to do it right though.
#
# In any case, I've coded the test suite for this module (see
# ../test/test_version.py) specifically to fail on things like comparing
# "1.2a2" and "1.2". That's not because the *code* is doing anything
# wrong, it's because the simple, obvious design doesn't match my
# complicated, hairy expectations for real-world version numbers. It
# would be a snap to fix the test suite to say, "Yep, LooseVersion does
# the Right Thing" (ie. the code matches the conception). But I'd rather
# have a conception that matches common notions about version numbers.
class LooseVersion (Version):
"""Version numbering for anarchists and software realists.
Implements the standard interface for version number classes as
described above. A version number consists of a series of numbers,
separated by either periods or strings of letters. When comparing
version numbers, the numeric components will be compared
numerically, and the alphabetic components lexically. The following
are all valid version numbers, in no particular order:
1.5.1
1.5.2b2
161
3.10a
8.02
3.4j
1996.07.12
3.2.pl0
3.1.1.6
2g6
11g
0.960923
2.2beta29
1.13++
5.5.kw
2.0b1pl0
In fact, there is no such thing as an invalid version number under
this scheme; the rules for comparison are simple and predictable,
but may not always give the results you want (for some definition
of "want").
"""
component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
def __init__ (self, vstring=None):
if vstring:
self.parse(vstring)
def parse (self, vstring):
# I've given up on thinking I can reconstruct the version string
# from the parsed tuple -- so I just store the string here for
# use by __str__
self.vstring = vstring
components = filter(lambda x: x and x != '.',
self.component_re.split(vstring))
for i in range(len(components)):
try:
components[i] = int(components[i])
except ValueError:
pass
self.version = components
def __str__ (self):
return self.vstring
def __repr__ (self):
return "LooseVersion ('%s')" % str(self)
def __cmp__ (self, other):
if isinstance(other, StringType):
other = LooseVersion(other)
return cmp(self.version, other.version)
# end class LooseVersion
|
inares/edx-platform | refs/heads/inares_sass | lms/djangoapps/lti_provider/startup.py | 63 | """Code run at server start up to initialize the lti_provider app."""
# Import the tasks module to ensure that signal handlers are registered.
import lms.djangoapps.lti_provider.tasks # pylint: disable=unused-import
|
dstroppa/openstack-smartos-nova-grizzly | refs/heads/master | nova/api/openstack/compute/views/limits.py | 9 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from nova.openstack.common import timeutils
class ViewBuilder(object):
"""OpenStack API base limits view builder."""
def build(self, rate_limits, absolute_limits):
rate_limits = self._build_rate_limits(rate_limits)
absolute_limits = self._build_absolute_limits(absolute_limits)
output = {
"limits": {
"rate": rate_limits,
"absolute": absolute_limits,
},
}
return output
def _build_absolute_limits(self, absolute_limits):
"""Builder for absolute limits
absolute_limits should be given as a dict of limits.
For example: {"ram": 512, "gigabytes": 1024}.
"""
limit_names = {
"ram": ["maxTotalRAMSize"],
"instances": ["maxTotalInstances"],
"cores": ["maxTotalCores"],
"key_pairs": ["maxTotalKeypairs"],
"floating_ips": ["maxTotalFloatingIps"],
"metadata_items": ["maxServerMeta", "maxImageMeta"],
"injected_files": ["maxPersonality"],
"injected_file_content_bytes": ["maxPersonalitySize"],
"security_groups": ["maxSecurityGroups"],
"security_group_rules": ["maxSecurityGroupRules"],
}
limits = {}
for name, value in absolute_limits.iteritems():
if name in limit_names and value is not None:
for name in limit_names[name]:
limits[name] = value
return limits
def _build_rate_limits(self, rate_limits):
limits = []
for rate_limit in rate_limits:
_rate_limit_key = None
_rate_limit = self._build_rate_limit(rate_limit)
# check for existing key
for limit in limits:
if (limit["uri"] == rate_limit["URI"] and
limit["regex"] == rate_limit["regex"]):
_rate_limit_key = limit
break
# ensure we have a key if we didn't find one
if not _rate_limit_key:
_rate_limit_key = {
"uri": rate_limit["URI"],
"regex": rate_limit["regex"],
"limit": [],
}
limits.append(_rate_limit_key)
_rate_limit_key["limit"].append(_rate_limit)
return limits
def _build_rate_limit(self, rate_limit):
_get_utc = datetime.datetime.utcfromtimestamp
next_avail = _get_utc(rate_limit["resetTime"])
return {
"verb": rate_limit["verb"],
"value": rate_limit["value"],
"remaining": int(rate_limit["remaining"]),
"unit": rate_limit["unit"],
"next-available": timeutils.isotime(at=next_avail),
}
|
cobalys/django | refs/heads/master | tests/modeltests/lookup/tests.py | 57 | from __future__ import absolute_import, unicode_literals
from datetime import datetime
from operator import attrgetter
from django.core.exceptions import FieldError
from django.test import TestCase, skipUnlessDBFeature
from .models import Author, Article, Tag, Game, Season, Player
class LookupTests(TestCase):
def setUp(self):
# Create a few Authors.
self.au1 = Author(name='Author 1')
self.au1.save()
self.au2 = Author(name='Author 2')
self.au2.save()
# Create a couple of Articles.
self.a1 = Article(headline='Article 1', pub_date=datetime(2005, 7, 26), author=self.au1)
self.a1.save()
self.a2 = Article(headline='Article 2', pub_date=datetime(2005, 7, 27), author=self.au1)
self.a2.save()
self.a3 = Article(headline='Article 3', pub_date=datetime(2005, 7, 27), author=self.au1)
self.a3.save()
self.a4 = Article(headline='Article 4', pub_date=datetime(2005, 7, 28), author=self.au1)
self.a4.save()
self.a5 = Article(headline='Article 5', pub_date=datetime(2005, 8, 1, 9, 0), author=self.au2)
self.a5.save()
self.a6 = Article(headline='Article 6', pub_date=datetime(2005, 8, 1, 8, 0), author=self.au2)
self.a6.save()
self.a7 = Article(headline='Article 7', pub_date=datetime(2005, 7, 27), author=self.au2)
self.a7.save()
# Create a few Tags.
self.t1 = Tag(name='Tag 1')
self.t1.save()
self.t1.articles.add(self.a1, self.a2, self.a3)
self.t2 = Tag(name='Tag 2')
self.t2.save()
self.t2.articles.add(self.a3, self.a4, self.a5)
self.t3 = Tag(name='Tag 3')
self.t3.save()
self.t3.articles.add(self.a5, self.a6, self.a7)
def test_exists(self):
# We can use .exists() to check that there are some
self.assertTrue(Article.objects.exists())
for a in Article.objects.all():
a.delete()
# There should be none now!
self.assertFalse(Article.objects.exists())
def test_lookup_int_as_str(self):
# Integer value can be queried using string
self.assertQuerysetEqual(Article.objects.filter(id__iexact=str(self.a1.id)),
['<Article: Article 1>'])
@skipUnlessDBFeature('supports_date_lookup_using_string')
def test_lookup_date_as_str(self):
# A date lookup can be performed using a string search
self.assertQuerysetEqual(Article.objects.filter(pub_date__startswith='2005'),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
def test_iterator(self):
# Each QuerySet gets iterator(), which is a generator that "lazily"
# returns results using database-level iteration.
self.assertQuerysetEqual(Article.objects.iterator(),
[
'Article 5',
'Article 6',
'Article 4',
'Article 2',
'Article 3',
'Article 7',
'Article 1',
],
transform=attrgetter('headline'))
# iterator() can be used on any QuerySet.
self.assertQuerysetEqual(
Article.objects.filter(headline__endswith='4').iterator(),
['Article 4'],
transform=attrgetter('headline'))
def test_count(self):
# count() returns the number of objects matching search criteria.
self.assertEqual(Article.objects.count(), 7)
self.assertEqual(Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).count(), 3)
self.assertEqual(Article.objects.filter(headline__startswith='Blah blah').count(), 0)
# count() should respect sliced query sets.
articles = Article.objects.all()
self.assertEqual(articles.count(), 7)
self.assertEqual(articles[:4].count(), 4)
self.assertEqual(articles[1:100].count(), 6)
self.assertEqual(articles[10:100].count(), 0)
# Date and date/time lookups can also be done with strings.
self.assertEqual(Article.objects.filter(pub_date__exact='2005-07-27 00:00:00').count(), 3)
def test_in_bulk(self):
# in_bulk() takes a list of IDs and returns a dictionary mapping IDs to objects.
arts = Article.objects.in_bulk([self.a1.id, self.a2.id])
self.assertEqual(arts[self.a1.id], self.a1)
self.assertEqual(arts[self.a2.id], self.a2)
self.assertEqual(Article.objects.in_bulk([self.a3.id]), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk(set([self.a3.id])), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk(frozenset([self.a3.id])), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk((self.a3.id,)), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk([1000]), {})
self.assertEqual(Article.objects.in_bulk([]), {})
self.assertEqual(Article.objects.in_bulk(iter([self.a1.id])), {self.a1.id: self.a1})
self.assertEqual(Article.objects.in_bulk(iter([])), {})
self.assertRaises(TypeError, Article.objects.in_bulk)
self.assertRaises(TypeError, Article.objects.in_bulk, headline__startswith='Blah')
def test_values(self):
# values() returns a list of dictionaries instead of object instances --
# and you can specify which fields you want to retrieve.
identity = lambda x:x
self.assertQuerysetEqual(Article.objects.values('headline'),
[
{'headline': 'Article 5'},
{'headline': 'Article 6'},
{'headline': 'Article 4'},
{'headline': 'Article 2'},
{'headline': 'Article 3'},
{'headline': 'Article 7'},
{'headline': 'Article 1'},
],
transform=identity)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).values('id'),
[{'id': self.a2.id}, {'id': self.a3.id}, {'id': self.a7.id}],
transform=identity)
self.assertQuerysetEqual(Article.objects.values('id', 'headline'),
[
{'id': self.a5.id, 'headline': 'Article 5'},
{'id': self.a6.id, 'headline': 'Article 6'},
{'id': self.a4.id, 'headline': 'Article 4'},
{'id': self.a2.id, 'headline': 'Article 2'},
{'id': self.a3.id, 'headline': 'Article 3'},
{'id': self.a7.id, 'headline': 'Article 7'},
{'id': self.a1.id, 'headline': 'Article 1'},
],
transform=identity)
# You can use values() with iterator() for memory savings,
# because iterator() uses database-level iteration.
self.assertQuerysetEqual(Article.objects.values('id', 'headline').iterator(),
[
{'headline': 'Article 5', 'id': self.a5.id},
{'headline': 'Article 6', 'id': self.a6.id},
{'headline': 'Article 4', 'id': self.a4.id},
{'headline': 'Article 2', 'id': self.a2.id},
{'headline': 'Article 3', 'id': self.a3.id},
{'headline': 'Article 7', 'id': self.a7.id},
{'headline': 'Article 1', 'id': self.a1.id},
],
transform=identity)
# The values() method works with "extra" fields specified in extra(select).
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id + 1'}).values('id', 'id_plus_one'),
[
{'id': self.a5.id, 'id_plus_one': self.a5.id + 1},
{'id': self.a6.id, 'id_plus_one': self.a6.id + 1},
{'id': self.a4.id, 'id_plus_one': self.a4.id + 1},
{'id': self.a2.id, 'id_plus_one': self.a2.id + 1},
{'id': self.a3.id, 'id_plus_one': self.a3.id + 1},
{'id': self.a7.id, 'id_plus_one': self.a7.id + 1},
{'id': self.a1.id, 'id_plus_one': self.a1.id + 1},
],
transform=identity)
data = {
'id_plus_one': 'id+1',
'id_plus_two': 'id+2',
'id_plus_three': 'id+3',
'id_plus_four': 'id+4',
'id_plus_five': 'id+5',
'id_plus_six': 'id+6',
'id_plus_seven': 'id+7',
'id_plus_eight': 'id+8',
}
self.assertQuerysetEqual(
Article.objects.filter(id=self.a1.id).extra(select=data).values(*data.keys()),
[{
'id_plus_one': self.a1.id + 1,
'id_plus_two': self.a1.id + 2,
'id_plus_three': self.a1.id + 3,
'id_plus_four': self.a1.id + 4,
'id_plus_five': self.a1.id + 5,
'id_plus_six': self.a1.id + 6,
'id_plus_seven': self.a1.id + 7,
'id_plus_eight': self.a1.id + 8,
}], transform=identity)
# You can specify fields from forward and reverse relations, just like filter().
self.assertQuerysetEqual(
Article.objects.values('headline', 'author__name'),
[
{'headline': self.a5.headline, 'author__name': self.au2.name},
{'headline': self.a6.headline, 'author__name': self.au2.name},
{'headline': self.a4.headline, 'author__name': self.au1.name},
{'headline': self.a2.headline, 'author__name': self.au1.name},
{'headline': self.a3.headline, 'author__name': self.au1.name},
{'headline': self.a7.headline, 'author__name': self.au2.name},
{'headline': self.a1.headline, 'author__name': self.au1.name},
], transform=identity)
self.assertQuerysetEqual(
Author.objects.values('name', 'article__headline').order_by('name', 'article__headline'),
[
{'name': self.au1.name, 'article__headline': self.a1.headline},
{'name': self.au1.name, 'article__headline': self.a2.headline},
{'name': self.au1.name, 'article__headline': self.a3.headline},
{'name': self.au1.name, 'article__headline': self.a4.headline},
{'name': self.au2.name, 'article__headline': self.a5.headline},
{'name': self.au2.name, 'article__headline': self.a6.headline},
{'name': self.au2.name, 'article__headline': self.a7.headline},
], transform=identity)
self.assertQuerysetEqual(
Author.objects.values('name', 'article__headline', 'article__tag__name').order_by('name', 'article__headline', 'article__tag__name'),
[
{'name': self.au1.name, 'article__headline': self.a1.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a2.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t2.name},
{'name': self.au1.name, 'article__headline': self.a4.headline, 'article__tag__name': self.t2.name},
{'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t2.name},
{'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t3.name},
{'name': self.au2.name, 'article__headline': self.a6.headline, 'article__tag__name': self.t3.name},
{'name': self.au2.name, 'article__headline': self.a7.headline, 'article__tag__name': self.t3.name},
], transform=identity)
# However, an exception FieldDoesNotExist will be thrown if you specify
# a non-existent field name in values() (a field that is neither in the
# model nor in extra(select)).
self.assertRaises(FieldError,
Article.objects.extra(select={'id_plus_one': 'id + 1'}).values,
'id', 'id_plus_two')
# If you don't specify field names to values(), all are returned.
self.assertQuerysetEqual(Article.objects.filter(id=self.a5.id).values(),
[{
'id': self.a5.id,
'author_id': self.au2.id,
'headline': 'Article 5',
'pub_date': datetime(2005, 8, 1, 9, 0)
}], transform=identity)
def test_values_list(self):
# values_list() is similar to values(), except that the results are
# returned as a list of tuples, rather than a list of dictionaries.
# Within each tuple, the order of the elements is the same as the order
# of fields in the values_list() call.
identity = lambda x:x
self.assertQuerysetEqual(Article.objects.values_list('headline'),
[
('Article 5',),
('Article 6',),
('Article 4',),
('Article 2',),
('Article 3',),
('Article 7',),
('Article 1',),
], transform=identity)
self.assertQuerysetEqual(Article.objects.values_list('id').order_by('id'),
[(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)],
transform=identity)
self.assertQuerysetEqual(
Article.objects.values_list('id', flat=True).order_by('id'),
[self.a1.id, self.a2.id, self.a3.id, self.a4.id, self.a5.id, self.a6.id, self.a7.id],
transform=identity)
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'})
.order_by('id').values_list('id'),
[(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)],
transform=identity)
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'})
.order_by('id').values_list('id_plus_one', 'id'),
[
(self.a1.id+1, self.a1.id),
(self.a2.id+1, self.a2.id),
(self.a3.id+1, self.a3.id),
(self.a4.id+1, self.a4.id),
(self.a5.id+1, self.a5.id),
(self.a6.id+1, self.a6.id),
(self.a7.id+1, self.a7.id)
],
transform=identity)
self.assertQuerysetEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'})
.order_by('id').values_list('id', 'id_plus_one'),
[
(self.a1.id, self.a1.id+1),
(self.a2.id, self.a2.id+1),
(self.a3.id, self.a3.id+1),
(self.a4.id, self.a4.id+1),
(self.a5.id, self.a5.id+1),
(self.a6.id, self.a6.id+1),
(self.a7.id, self.a7.id+1)
],
transform=identity)
self.assertQuerysetEqual(
Author.objects.values_list('name', 'article__headline', 'article__tag__name').order_by('name', 'article__headline', 'article__tag__name'),
[
(self.au1.name, self.a1.headline, self.t1.name),
(self.au1.name, self.a2.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t2.name),
(self.au1.name, self.a4.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t3.name),
(self.au2.name, self.a6.headline, self.t3.name),
(self.au2.name, self.a7.headline, self.t3.name),
], transform=identity)
self.assertRaises(TypeError, Article.objects.values_list, 'id', 'headline', flat=True)
def test_get_next_previous_by(self):
# Every DateField and DateTimeField creates get_next_by_FOO() and
# get_previous_by_FOO() methods. In the case of identical date values,
# these methods will use the ID as a fallback check. This guarantees
# that no records are skipped or duplicated.
self.assertEqual(repr(self.a1.get_next_by_pub_date()),
'<Article: Article 2>')
self.assertEqual(repr(self.a2.get_next_by_pub_date()),
'<Article: Article 3>')
self.assertEqual(repr(self.a2.get_next_by_pub_date(headline__endswith='6')),
'<Article: Article 6>')
self.assertEqual(repr(self.a3.get_next_by_pub_date()),
'<Article: Article 7>')
self.assertEqual(repr(self.a4.get_next_by_pub_date()),
'<Article: Article 6>')
self.assertRaises(Article.DoesNotExist, self.a5.get_next_by_pub_date)
self.assertEqual(repr(self.a6.get_next_by_pub_date()),
'<Article: Article 5>')
self.assertEqual(repr(self.a7.get_next_by_pub_date()),
'<Article: Article 4>')
self.assertEqual(repr(self.a7.get_previous_by_pub_date()),
'<Article: Article 3>')
self.assertEqual(repr(self.a6.get_previous_by_pub_date()),
'<Article: Article 4>')
self.assertEqual(repr(self.a5.get_previous_by_pub_date()),
'<Article: Article 6>')
self.assertEqual(repr(self.a4.get_previous_by_pub_date()),
'<Article: Article 7>')
self.assertEqual(repr(self.a3.get_previous_by_pub_date()),
'<Article: Article 2>')
self.assertEqual(repr(self.a2.get_previous_by_pub_date()),
'<Article: Article 1>')
def test_escaping(self):
# Underscores, percent signs and backslashes have special meaning in the
# underlying SQL code, but Django handles the quoting of them automatically.
a8 = Article(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20))
a8.save()
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article'),
[
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article_'),
['<Article: Article_ with underscore>'])
a9 = Article(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21))
a9.save()
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article'),
[
'<Article: Article% with percent sign>',
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article%'),
['<Article: Article% with percent sign>'])
a10 = Article(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22))
a10.save()
self.assertQuerysetEqual(Article.objects.filter(headline__contains='\\'),
['<Article: Article with \ backslash>'])
def test_exclude(self):
a8 = Article.objects.create(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20))
a9 = Article.objects.create(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21))
a10 = Article.objects.create(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22))
# exclude() is the opposite of filter() when doing lookups:
self.assertQuerysetEqual(
Article.objects.filter(headline__contains='Article').exclude(headline__contains='with'),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.exclude(headline__startswith="Article_"),
[
'<Article: Article with \\ backslash>',
'<Article: Article% with percent sign>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
self.assertQuerysetEqual(Article.objects.exclude(headline="Article 7"),
[
'<Article: Article with \\ backslash>',
'<Article: Article% with percent sign>',
'<Article: Article_ with underscore>',
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 1>',
])
def test_none(self):
# none() returns an EmptyQuerySet that behaves like any other QuerySet object
self.assertQuerysetEqual(Article.objects.none(), [])
self.assertQuerysetEqual(
Article.objects.none().filter(headline__startswith='Article'), [])
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Article').none(), [])
self.assertEqual(Article.objects.none().count(), 0)
self.assertEqual(
Article.objects.none().update(headline="This should not take effect"), 0)
self.assertQuerysetEqual(
[article for article in Article.objects.none().iterator()],
[])
def test_in(self):
# using __in with an empty list should return an empty query set
self.assertQuerysetEqual(Article.objects.filter(id__in=[]), [])
self.assertQuerysetEqual(Article.objects.exclude(id__in=[]),
[
'<Article: Article 5>',
'<Article: Article 6>',
'<Article: Article 4>',
'<Article: Article 2>',
'<Article: Article 3>',
'<Article: Article 7>',
'<Article: Article 1>',
])
def test_error_messages(self):
# Programming errors are pointed out with nice error messages
try:
Article.objects.filter(pub_date_year='2005').count()
self.fail('FieldError not raised')
except FieldError as ex:
self.assertEqual(str(ex), "Cannot resolve keyword 'pub_date_year' "
"into field. Choices are: author, headline, id, pub_date, tag")
try:
Article.objects.filter(headline__starts='Article')
self.fail('FieldError not raised')
except FieldError as ex:
self.assertEqual(str(ex), "Join on field 'headline' not permitted. "
"Did you misspell 'starts' for the lookup type?")
def test_regex(self):
# Create some articles with a bit more interesting headlines for testing field lookups:
for a in Article.objects.all():
a.delete()
now = datetime.now()
a1 = Article(pub_date=now, headline='f')
a1.save()
a2 = Article(pub_date=now, headline='fo')
a2.save()
a3 = Article(pub_date=now, headline='foo')
a3.save()
a4 = Article(pub_date=now, headline='fooo')
a4.save()
a5 = Article(pub_date=now, headline='hey-Foo')
a5.save()
a6 = Article(pub_date=now, headline='bar')
a6.save()
a7 = Article(pub_date=now, headline='AbBa')
a7.save()
a8 = Article(pub_date=now, headline='baz')
a8.save()
a9 = Article(pub_date=now, headline='baxZ')
a9.save()
# zero-or-more
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'fo*'),
['<Article: f>', '<Article: fo>', '<Article: foo>', '<Article: fooo>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'fo*'),
[
'<Article: f>',
'<Article: fo>',
'<Article: foo>',
'<Article: fooo>',
'<Article: hey-Foo>',
])
# one-or-more
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'fo+'),
['<Article: fo>', '<Article: foo>', '<Article: fooo>'])
# wildcard
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'fooo?'),
['<Article: foo>', '<Article: fooo>'])
# leading anchor
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'^b'),
['<Article: bar>', '<Article: baxZ>', '<Article: baz>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'^a'),
['<Article: AbBa>'])
# trailing anchor
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'z$'),
['<Article: baz>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'z$'),
['<Article: baxZ>', '<Article: baz>'])
# character sets
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'ba[rz]'),
['<Article: bar>', '<Article: baz>'])
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'ba.[RxZ]'),
['<Article: baxZ>'])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'ba[RxZ]'),
['<Article: bar>', '<Article: baxZ>', '<Article: baz>'])
# and more articles:
a10 = Article(pub_date=now, headline='foobar')
a10.save()
a11 = Article(pub_date=now, headline='foobaz')
a11.save()
a12 = Article(pub_date=now, headline='ooF')
a12.save()
a13 = Article(pub_date=now, headline='foobarbaz')
a13.save()
a14 = Article(pub_date=now, headline='zoocarfaz')
a14.save()
a15 = Article(pub_date=now, headline='barfoobaz')
a15.save()
a16 = Article(pub_date=now, headline='bazbaRFOO')
a16.save()
# alternation
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'oo(f|b)'),
[
'<Article: barfoobaz>',
'<Article: foobar>',
'<Article: foobarbaz>',
'<Article: foobaz>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'oo(f|b)'),
[
'<Article: barfoobaz>',
'<Article: foobar>',
'<Article: foobarbaz>',
'<Article: foobaz>',
'<Article: ooF>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'^foo(f|b)'),
['<Article: foobar>', '<Article: foobarbaz>', '<Article: foobaz>'])
# greedy matching
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'b.*az'),
[
'<Article: barfoobaz>',
'<Article: baz>',
'<Article: bazbaRFOO>',
'<Article: foobarbaz>',
'<Article: foobaz>',
])
self.assertQuerysetEqual(Article.objects.filter(headline__iregex=r'b.*ar'),
[
'<Article: bar>',
'<Article: barfoobaz>',
'<Article: bazbaRFOO>',
'<Article: foobar>',
'<Article: foobarbaz>',
])
@skipUnlessDBFeature('supports_regex_backreferencing')
def test_regex_backreferencing(self):
# grouping and backreferences
now = datetime.now()
a10 = Article(pub_date=now, headline='foobar')
a10.save()
a11 = Article(pub_date=now, headline='foobaz')
a11.save()
a12 = Article(pub_date=now, headline='ooF')
a12.save()
a13 = Article(pub_date=now, headline='foobarbaz')
a13.save()
a14 = Article(pub_date=now, headline='zoocarfaz')
a14.save()
a15 = Article(pub_date=now, headline='barfoobaz')
a15.save()
a16 = Article(pub_date=now, headline='bazbaRFOO')
a16.save()
self.assertQuerysetEqual(Article.objects.filter(headline__regex=r'b(.).*b\1'),
['<Article: barfoobaz>', '<Article: bazbaRFOO>', '<Article: foobarbaz>'])
def test_nonfield_lookups(self):
"""
Ensure that a lookup query containing non-fields raises the proper
exception.
"""
with self.assertRaises(FieldError):
Article.objects.filter(headline__blahblah=99)
with self.assertRaises(FieldError):
Article.objects.filter(headline__blahblah__exact=99)
with self.assertRaises(FieldError):
Article.objects.filter(blahblah=99)
def test_lookup_collision(self):
"""
Ensure that genuine field names don't collide with built-in lookup
types ('year', 'gt', 'range', 'in' etc.).
Refs #11670.
"""
# Here we're using 'gt' as a code number for the year, e.g. 111=>2009.
season_2009 = Season.objects.create(year=2009, gt=111)
season_2009.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2010 = Season.objects.create(year=2010, gt=222)
season_2010.games.create(home="Houston Astros", away="Chicago Cubs")
season_2010.games.create(home="Houston Astros", away="Milwaukee Brewers")
season_2010.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011 = Season.objects.create(year=2011, gt=333)
season_2011.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011.games.create(home="Houston Astros", away="Milwaukee Brewers")
hunter_pence = Player.objects.create(name="Hunter Pence")
hunter_pence.games = Game.objects.filter(season__year__in=[2009, 2010])
pudge = Player.objects.create(name="Ivan Rodriquez")
pudge.games = Game.objects.filter(season__year=2009)
pedro_feliz = Player.objects.create(name="Pedro Feliz")
pedro_feliz.games = Game.objects.filter(season__year__in=[2011])
johnson = Player.objects.create(name="Johnson")
johnson.games = Game.objects.filter(season__year__in=[2011])
# Games in 2010
self.assertEqual(Game.objects.filter(season__year=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__year__exact=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__gt=222).count(), 3)
self.assertEqual(Game.objects.filter(season__gt__exact=222).count(), 3)
# Games in 2011
self.assertEqual(Game.objects.filter(season__year=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__year__exact=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__gt=333).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__exact=333).count(), 2)
self.assertEqual(Game.objects.filter(season__year__gt=2010).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__gt=222).count(), 2)
# Games played in 2010 and 2011
self.assertEqual(Game.objects.filter(season__year__in=[2010, 2011]).count(), 5)
self.assertEqual(Game.objects.filter(season__year__gt=2009).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__in=[222, 333]).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__gt=111).count(), 5)
# Players who played in 2009
self.assertEqual(Player.objects.filter(games__season__year=2009).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__exact=2009).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt=111).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt__exact=111).distinct().count(), 2)
# Players who played in 2010
self.assertEqual(Player.objects.filter(games__season__year=2010).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__year__exact=2010).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__gt=222).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__gt__exact=222).distinct().count(), 1)
# Players who played in 2011
self.assertEqual(Player.objects.filter(games__season__year=2011).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__exact=2011).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt=333).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__gt=2010).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt__gt=222).distinct().count(), 2)
|
quxiaolong1504/django | refs/heads/master | tests/ordering/models.py | 66 | """
Specifying ordering
Specify default ordering for a model using the ``ordering`` attribute, which
should be a list or tuple of field names. This tells Django how to order
``QuerySet`` results.
If a field name in ``ordering`` starts with a hyphen, that field will be
ordered in descending order. Otherwise, it'll be ordered in ascending order.
The special-case field name ``"?"`` specifies random order.
The ordering attribute is not required. If you leave it off, ordering will be
undefined -- not random, just undefined.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class Author(models.Model):
class Meta:
ordering = ('-pk',)
@python_2_unicode_compatible
class Article(models.Model):
author = models.ForeignKey(Author, null=True)
second_author = models.ForeignKey(Author, null=True)
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
class Meta:
ordering = ('-pub_date', 'headline')
def __str__(self):
return self.headline
class OrderedByAuthorArticle(Article):
class Meta:
proxy = True
ordering = ('author', 'second_author')
class Reference(models.Model):
article = models.ForeignKey(OrderedByAuthorArticle)
class Meta:
ordering = ('article',)
|
iku000888/Wordprocessing-100-Knoks | refs/heads/master | prob002/python/prob002.py | 1 | import sys
line1 = str(sys.argv[1]);
line2 = str(sys.argv[2]);
newline="";
i = 0;
while i<len(line1):
newline=newline+line1[i]+line2[i];
i = i + 1;
print newline;
|
michalkurka/h2o-3 | refs/heads/master | h2o-py/GLRM_performance_tests/pyunit_milsong_performance_profile.py | 6 | from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glrm import H2OGeneralizedLowRankEstimator
# This test is just to take a large dataset, perform GLRM on it and figure
# out the performance time. This test should not be run on Jenkins. It
# simply takes too long
def glrm_subset():
acs_orig = h2o.upload_file(path=pyunit_utils.locate("bigdata/laptop/milsongs/milsongs-cls-train.csv.gz"))
seeds = [2297378124, 3849570216, 6733652048, 8915337442, 8344418400, 9416580152, 2598632624, 4977008454, 8273228579,
8185554539, 3219125000, 2998879373, 7707012513, 5786923379, 5029788935, 935945790, 7092607078, 9305834745,
6173975590, 5397294255]
run_time_ms = []
iterations = []
objective = []
num_runs = 10 # number of times to repeat experiments
for ind in range(num_runs):
acs_model = H2OGeneralizedLowRankEstimator(k = 10,
transform = 'STANDARDIZE',
loss = 'Quadratic',
multi_loss="Categorical",
model_id="clients_core_glrm",
regularization_x="L2",
regularization_y="L1",
gamma_x=0.2,
gamma_y=0.5,
init="SVD",
max_iterations = 1000,
seed=seeds[ind % len(seeds)])
acs_model.train(x = acs_orig.names, training_frame= acs_orig, seed=seeds[ind % len(seeds)])
run_time_ms.append(acs_model._model_json['output']['end_time'] - acs_model._model_json['output']['start_time'])
iterations.append(acs_model._model_json['output']['iterations'])
objective.append(acs_model._model_json['output']['objective'])
print("Run time in ms: {0}".format(run_time_ms))
print("number of iterations: {0}".format(iterations))
print("objective function value: {0}".format(objective))
sys.stdout.flush()
if __name__ == "__main__":
pyunit_utils.standalone_test(glrm_subset)
else:
glrm_subset()
|
pongem/python-bot-project | refs/heads/master | appengine/standard/botapp/lib/django/contrib/gis/gdal/geometries.py | 33 | """
The OGRGeometry is a wrapper for using the OGR Geometry class
(see http://www.gdal.org/classOGRGeometry.html). OGRGeometry
may be instantiated when reading geometries from OGR Data Sources
(e.g. SHP files), or when given OGC WKT (a string).
While the 'full' API is not present yet, the API is "pythonic" unlike
the traditional and "next-generation" OGR Python bindings. One major
advantage OGR Geometries have over their GEOS counterparts is support
for spatial reference systems and their transformation.
Example:
>>> from django.contrib.gis.gdal import OGRGeometry, OGRGeomType, SpatialReference
>>> wkt1, wkt2 = 'POINT(-90 30)', 'POLYGON((0 0, 5 0, 5 5, 0 5)'
>>> pnt = OGRGeometry(wkt1)
>>> print(pnt)
POINT (-90 30)
>>> mpnt = OGRGeometry(OGRGeomType('MultiPoint'), SpatialReference('WGS84'))
>>> mpnt.add(wkt1)
>>> mpnt.add(wkt1)
>>> print(mpnt)
MULTIPOINT (-90 30,-90 30)
>>> print(mpnt.srs.name)
WGS 84
>>> print(mpnt.srs.proj)
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> mpnt.transform(SpatialReference('NAD27'))
>>> print(mpnt.proj)
+proj=longlat +ellps=clrk66 +datum=NAD27 +no_defs
>>> print(mpnt)
MULTIPOINT (-89.999930378602485 29.999797886557641,-89.999930378602485 29.999797886557641)
The OGRGeomType class is to make it easy to specify an OGR geometry type:
>>> from django.contrib.gis.gdal import OGRGeomType
>>> gt1 = OGRGeomType(3) # Using an integer for the type
>>> gt2 = OGRGeomType('Polygon') # Using a string
>>> gt3 = OGRGeomType('POLYGON') # It's case-insensitive
>>> print(gt1 == 3, gt1 == 'Polygon') # Equivalence works w/non-OGRGeomType objects
True True
"""
import sys
from binascii import a2b_hex, b2a_hex
from ctypes import byref, c_char_p, c_double, c_ubyte, c_void_p, string_at
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.envelope import Envelope, OGREnvelope
from django.contrib.gis.gdal.error import (
GDALException, OGRIndexError, SRSException,
)
from django.contrib.gis.gdal.geomtype import OGRGeomType
from django.contrib.gis.gdal.prototypes import geom as capi, srs as srs_api
from django.contrib.gis.gdal.srs import CoordTransform, SpatialReference
from django.contrib.gis.geometry.regex import hex_regex, json_regex, wkt_regex
from django.utils import six
from django.utils.six.moves import range
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr__api_8h.html
#
# The OGR_G_* routines are relevant here.
class OGRGeometry(GDALBase):
"Generally encapsulates an OGR geometry."
def __init__(self, geom_input, srs=None):
"Initializes Geometry on either WKT or an OGR pointer as input."
str_instance = isinstance(geom_input, six.string_types)
# If HEX, unpack input to a binary buffer.
if str_instance and hex_regex.match(geom_input):
geom_input = six.memoryview(a2b_hex(geom_input.upper().encode()))
str_instance = False
# Constructing the geometry,
if str_instance:
wkt_m = wkt_regex.match(geom_input)
json_m = json_regex.match(geom_input)
if wkt_m:
if wkt_m.group('srid'):
# If there's EWKT, set the SRS w/value of the SRID.
srs = int(wkt_m.group('srid'))
if wkt_m.group('type').upper() == 'LINEARRING':
# OGR_G_CreateFromWkt doesn't work with LINEARRING WKT.
# See http://trac.osgeo.org/gdal/ticket/1992.
g = capi.create_geom(OGRGeomType(wkt_m.group('type')).num)
capi.import_wkt(g, byref(c_char_p(wkt_m.group('wkt').encode())))
else:
g = capi.from_wkt(byref(c_char_p(wkt_m.group('wkt').encode())), None, byref(c_void_p()))
elif json_m:
g = capi.from_json(geom_input.encode())
else:
# Seeing if the input is a valid short-hand string
# (e.g., 'Point', 'POLYGON').
OGRGeomType(geom_input)
g = capi.create_geom(OGRGeomType(geom_input).num)
elif isinstance(geom_input, six.memoryview):
# WKB was passed in
g = capi.from_wkb(bytes(geom_input), None, byref(c_void_p()), len(geom_input))
elif isinstance(geom_input, OGRGeomType):
# OGRGeomType was passed in, an empty geometry will be created.
g = capi.create_geom(geom_input.num)
elif isinstance(geom_input, self.ptr_type):
# OGR pointer (c_void_p) was the input.
g = geom_input
else:
raise GDALException('Invalid input type for OGR Geometry construction: %s' % type(geom_input))
# Now checking the Geometry pointer before finishing initialization
# by setting the pointer for the object.
if not g:
raise GDALException('Cannot create OGR Geometry from input: %s' % str(geom_input))
self.ptr = g
# Assigning the SpatialReference object to the geometry, if valid.
if srs:
self.srs = srs
# Setting the class depending upon the OGR Geometry Type
self.__class__ = GEO_CLASSES[self.geom_type.num]
def __del__(self):
"Deletes this Geometry."
try:
capi.destroy_geom(self._ptr)
except (AttributeError, TypeError):
pass # Some part might already have been garbage collected
# Pickle routines
def __getstate__(self):
srs = self.srs
if srs:
srs = srs.wkt
else:
srs = None
return bytes(self.wkb), srs
def __setstate__(self, state):
wkb, srs = state
ptr = capi.from_wkb(wkb, None, byref(c_void_p()), len(wkb))
if not ptr:
raise GDALException('Invalid OGRGeometry loaded from pickled state.')
self.ptr = ptr
self.srs = srs
@classmethod
def from_bbox(cls, bbox):
"Constructs a Polygon from a bounding box (4-tuple)."
x0, y0, x1, y1 = bbox
return OGRGeometry('POLYGON((%s %s, %s %s, %s %s, %s %s, %s %s))' % (
x0, y0, x0, y1, x1, y1, x1, y0, x0, y0))
# ### Geometry set-like operations ###
# g = g1 | g2
def __or__(self, other):
"Returns the union of the two geometries."
return self.union(other)
# g = g1 & g2
def __and__(self, other):
"Returns the intersection of this Geometry and the other."
return self.intersection(other)
# g = g1 - g2
def __sub__(self, other):
"Return the difference this Geometry and the other."
return self.difference(other)
# g = g1 ^ g2
def __xor__(self, other):
"Return the symmetric difference of this Geometry and the other."
return self.sym_difference(other)
def __eq__(self, other):
"Is this Geometry equal to the other?"
if isinstance(other, OGRGeometry):
return self.equals(other)
else:
return False
def __ne__(self, other):
"Tests for inequality."
return not (self == other)
def __str__(self):
"WKT is used for the string representation."
return self.wkt
# #### Geometry Properties ####
@property
def dimension(self):
"Returns 0 for points, 1 for lines, and 2 for surfaces."
return capi.get_dims(self.ptr)
def _get_coord_dim(self):
"Returns the coordinate dimension of the Geometry."
return capi.get_coord_dim(self.ptr)
def _set_coord_dim(self, dim):
"Sets the coordinate dimension of this Geometry."
if dim not in (2, 3):
raise ValueError('Geometry dimension must be either 2 or 3')
capi.set_coord_dim(self.ptr, dim)
coord_dim = property(_get_coord_dim, _set_coord_dim)
@property
def geom_count(self):
"The number of elements in this Geometry."
return capi.get_geom_count(self.ptr)
@property
def point_count(self):
"Returns the number of Points in this Geometry."
return capi.get_point_count(self.ptr)
@property
def num_points(self):
"Alias for `point_count` (same name method in GEOS API.)"
return self.point_count
@property
def num_coords(self):
"Alais for `point_count`."
return self.point_count
@property
def geom_type(self):
"Returns the Type for this Geometry."
return OGRGeomType(capi.get_geom_type(self.ptr))
@property
def geom_name(self):
"Returns the Name of this Geometry."
return capi.get_geom_name(self.ptr)
@property
def area(self):
"Returns the area for a LinearRing, Polygon, or MultiPolygon; 0 otherwise."
return capi.get_area(self.ptr)
@property
def envelope(self):
"Returns the envelope for this Geometry."
# TODO: Fix Envelope() for Point geometries.
return Envelope(capi.get_envelope(self.ptr, byref(OGREnvelope())))
@property
def extent(self):
"Returns the envelope as a 4-tuple, instead of as an Envelope object."
return self.envelope.tuple
# #### SpatialReference-related Properties ####
# The SRS property
def _get_srs(self):
"Returns the Spatial Reference for this Geometry."
try:
srs_ptr = capi.get_geom_srs(self.ptr)
return SpatialReference(srs_api.clone_srs(srs_ptr))
except SRSException:
return None
def _set_srs(self, srs):
"Sets the SpatialReference for this geometry."
# Do not have to clone the `SpatialReference` object pointer because
# when it is assigned to this `OGRGeometry` it's internal OGR
# reference count is incremented, and will likewise be released
# (decremented) when this geometry's destructor is called.
if isinstance(srs, SpatialReference):
srs_ptr = srs.ptr
elif isinstance(srs, six.integer_types + six.string_types):
sr = SpatialReference(srs)
srs_ptr = sr.ptr
elif srs is None:
srs_ptr = None
else:
raise TypeError('Cannot assign spatial reference with object of type: %s' % type(srs))
capi.assign_srs(self.ptr, srs_ptr)
srs = property(_get_srs, _set_srs)
# The SRID property
def _get_srid(self):
srs = self.srs
if srs:
return srs.srid
return None
def _set_srid(self, srid):
if isinstance(srid, six.integer_types) or srid is None:
self.srs = srid
else:
raise TypeError('SRID must be set with an integer.')
srid = property(_get_srid, _set_srid)
# #### Output Methods ####
@property
def geos(self):
"Returns a GEOSGeometry object from this OGRGeometry."
from django.contrib.gis.geos import GEOSGeometry
return GEOSGeometry(self.wkb, self.srid)
@property
def gml(self):
"Returns the GML representation of the Geometry."
return capi.to_gml(self.ptr)
@property
def hex(self):
"Returns the hexadecimal representation of the WKB (a string)."
return b2a_hex(self.wkb).upper()
@property
def json(self):
"""
Returns the GeoJSON representation of this Geometry.
"""
return capi.to_json(self.ptr)
geojson = json
@property
def kml(self):
"Returns the KML representation of the Geometry."
return capi.to_kml(self.ptr, None)
@property
def wkb_size(self):
"Returns the size of the WKB buffer."
return capi.get_wkbsize(self.ptr)
@property
def wkb(self):
"Returns the WKB representation of the Geometry."
if sys.byteorder == 'little':
byteorder = 1 # wkbNDR (from ogr_core.h)
else:
byteorder = 0 # wkbXDR
sz = self.wkb_size
# Creating the unsigned character buffer, and passing it in by reference.
buf = (c_ubyte * sz)()
capi.to_wkb(self.ptr, byteorder, byref(buf))
# Returning a buffer of the string at the pointer.
return six.memoryview(string_at(buf, sz))
@property
def wkt(self):
"Returns the WKT representation of the Geometry."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def ewkt(self):
"Returns the EWKT representation of the Geometry."
srs = self.srs
if srs and srs.srid:
return 'SRID=%s;%s' % (srs.srid, self.wkt)
else:
return self.wkt
# #### Geometry Methods ####
def clone(self):
"Clones this OGR Geometry."
return OGRGeometry(capi.clone_geom(self.ptr), self.srs)
def close_rings(self):
"""
If there are any rings within this geometry that have not been
closed, this routine will do so by adding the starting point at the
end.
"""
# Closing the open rings.
capi.geom_close_rings(self.ptr)
def transform(self, coord_trans, clone=False):
"""
Transforms this geometry to a different spatial reference system.
May take a CoordTransform object, a SpatialReference object, string
WKT or PROJ.4, and/or an integer SRID. By default nothing is returned
and the geometry is transformed in-place. However, if the `clone`
keyword is set, then a transformed clone of this geometry will be
returned.
"""
if clone:
klone = self.clone()
klone.transform(coord_trans)
return klone
# Depending on the input type, use the appropriate OGR routine
# to perform the transformation.
if isinstance(coord_trans, CoordTransform):
capi.geom_transform(self.ptr, coord_trans.ptr)
elif isinstance(coord_trans, SpatialReference):
capi.geom_transform_to(self.ptr, coord_trans.ptr)
elif isinstance(coord_trans, six.integer_types + six.string_types):
sr = SpatialReference(coord_trans)
capi.geom_transform_to(self.ptr, sr.ptr)
else:
raise TypeError('Transform only accepts CoordTransform, '
'SpatialReference, string, and integer objects.')
# #### Topology Methods ####
def _topology(self, func, other):
"""A generalized function for topology operations, takes a GDAL function and
the other geometry to perform the operation on."""
if not isinstance(other, OGRGeometry):
raise TypeError('Must use another OGRGeometry object for topology operations!')
# Returning the output of the given function with the other geometry's
# pointer.
return func(self.ptr, other.ptr)
def intersects(self, other):
"Returns True if this geometry intersects with the other."
return self._topology(capi.ogr_intersects, other)
def equals(self, other):
"Returns True if this geometry is equivalent to the other."
return self._topology(capi.ogr_equals, other)
def disjoint(self, other):
"Returns True if this geometry and the other are spatially disjoint."
return self._topology(capi.ogr_disjoint, other)
def touches(self, other):
"Returns True if this geometry touches the other."
return self._topology(capi.ogr_touches, other)
def crosses(self, other):
"Returns True if this geometry crosses the other."
return self._topology(capi.ogr_crosses, other)
def within(self, other):
"Returns True if this geometry is within the other."
return self._topology(capi.ogr_within, other)
def contains(self, other):
"Returns True if this geometry contains the other."
return self._topology(capi.ogr_contains, other)
def overlaps(self, other):
"Returns True if this geometry overlaps the other."
return self._topology(capi.ogr_overlaps, other)
# #### Geometry-generation Methods ####
def _geomgen(self, gen_func, other=None):
"A helper routine for the OGR routines that generate geometries."
if isinstance(other, OGRGeometry):
return OGRGeometry(gen_func(self.ptr, other.ptr), self.srs)
else:
return OGRGeometry(gen_func(self.ptr), self.srs)
@property
def boundary(self):
"Returns the boundary of this geometry."
return self._geomgen(capi.get_boundary)
@property
def convex_hull(self):
"""
Returns the smallest convex Polygon that contains all the points in
this Geometry.
"""
return self._geomgen(capi.geom_convex_hull)
def difference(self, other):
"""
Returns a new geometry consisting of the region which is the difference
of this geometry and the other.
"""
return self._geomgen(capi.geom_diff, other)
def intersection(self, other):
"""
Returns a new geometry consisting of the region of intersection of this
geometry and the other.
"""
return self._geomgen(capi.geom_intersection, other)
def sym_difference(self, other):
"""
Returns a new geometry which is the symmetric difference of this
geometry and the other.
"""
return self._geomgen(capi.geom_sym_diff, other)
def union(self, other):
"""
Returns a new geometry consisting of the region which is the union of
this geometry and the other.
"""
return self._geomgen(capi.geom_union, other)
# The subclasses for OGR Geometry.
class Point(OGRGeometry):
@property
def x(self):
"Returns the X coordinate for this Point."
return capi.getx(self.ptr, 0)
@property
def y(self):
"Returns the Y coordinate for this Point."
return capi.gety(self.ptr, 0)
@property
def z(self):
"Returns the Z coordinate for this Point."
if self.coord_dim == 3:
return capi.getz(self.ptr, 0)
@property
def tuple(self):
"Returns the tuple of this point."
if self.coord_dim == 2:
return (self.x, self.y)
elif self.coord_dim == 3:
return (self.x, self.y, self.z)
coords = tuple
class LineString(OGRGeometry):
def __getitem__(self, index):
"Returns the Point at the given index."
if index >= 0 and index < self.point_count:
x, y, z = c_double(), c_double(), c_double()
capi.get_point(self.ptr, index, byref(x), byref(y), byref(z))
dim = self.coord_dim
if dim == 1:
return (x.value,)
elif dim == 2:
return (x.value, y.value)
elif dim == 3:
return (x.value, y.value, z.value)
else:
raise OGRIndexError('index out of range: %s' % str(index))
def __iter__(self):
"Iterates over each point in the LineString."
for i in range(self.point_count):
yield self[i]
def __len__(self):
"The length returns the number of points in the LineString."
return self.point_count
@property
def tuple(self):
"Returns the tuple representation of this LineString."
return tuple(self[i] for i in range(len(self)))
coords = tuple
def _listarr(self, func):
"""
Internal routine that returns a sequence (list) corresponding with
the given function.
"""
return [func(self.ptr, i) for i in range(len(self))]
@property
def x(self):
"Returns the X coordinates in a list."
return self._listarr(capi.getx)
@property
def y(self):
"Returns the Y coordinates in a list."
return self._listarr(capi.gety)
@property
def z(self):
"Returns the Z coordinates in a list."
if self.coord_dim == 3:
return self._listarr(capi.getz)
# LinearRings are used in Polygons.
class LinearRing(LineString):
pass
class Polygon(OGRGeometry):
def __len__(self):
"The number of interior rings in this Polygon."
return self.geom_count
def __iter__(self):
"Iterates through each ring in the Polygon."
for i in range(self.geom_count):
yield self[i]
def __getitem__(self, index):
"Gets the ring at the specified index."
if index < 0 or index >= self.geom_count:
raise OGRIndexError('index out of range: %s' % index)
else:
return OGRGeometry(capi.clone_geom(capi.get_geom_ref(self.ptr, index)), self.srs)
# Polygon Properties
@property
def shell(self):
"Returns the shell of this Polygon."
return self[0] # First ring is the shell
exterior_ring = shell
@property
def tuple(self):
"Returns a tuple of LinearRing coordinate tuples."
return tuple(self[i].tuple for i in range(self.geom_count))
coords = tuple
@property
def point_count(self):
"The number of Points in this Polygon."
# Summing up the number of points in each ring of the Polygon.
return sum(self[i].point_count for i in range(self.geom_count))
@property
def centroid(self):
"Returns the centroid (a Point) of this Polygon."
# The centroid is a Point, create a geometry for this.
p = OGRGeometry(OGRGeomType('Point'))
capi.get_centroid(self.ptr, p.ptr)
return p
# Geometry Collection base class.
class GeometryCollection(OGRGeometry):
"The Geometry Collection class."
def __getitem__(self, index):
"Gets the Geometry at the specified index."
if index < 0 or index >= self.geom_count:
raise OGRIndexError('index out of range: %s' % index)
else:
return OGRGeometry(capi.clone_geom(capi.get_geom_ref(self.ptr, index)), self.srs)
def __iter__(self):
"Iterates over each Geometry."
for i in range(self.geom_count):
yield self[i]
def __len__(self):
"The number of geometries in this Geometry Collection."
return self.geom_count
def add(self, geom):
"Add the geometry to this Geometry Collection."
if isinstance(geom, OGRGeometry):
if isinstance(geom, self.__class__):
for g in geom:
capi.add_geom(self.ptr, g.ptr)
else:
capi.add_geom(self.ptr, geom.ptr)
elif isinstance(geom, six.string_types):
tmp = OGRGeometry(geom)
capi.add_geom(self.ptr, tmp.ptr)
else:
raise GDALException('Must add an OGRGeometry.')
@property
def point_count(self):
"The number of Points in this Geometry Collection."
# Summing up the number of points in each geometry in this collection
return sum(self[i].point_count for i in range(self.geom_count))
@property
def tuple(self):
"Returns a tuple representation of this Geometry Collection."
return tuple(self[i].tuple for i in range(self.geom_count))
coords = tuple
# Multiple Geometry types.
class MultiPoint(GeometryCollection):
pass
class MultiLineString(GeometryCollection):
pass
class MultiPolygon(GeometryCollection):
pass
# Class mapping dictionary (using the OGRwkbGeometryType as the key)
GEO_CLASSES = {1: Point,
2: LineString,
3: Polygon,
4: MultiPoint,
5: MultiLineString,
6: MultiPolygon,
7: GeometryCollection,
101: LinearRing,
1 + OGRGeomType.wkb25bit: Point,
2 + OGRGeomType.wkb25bit: LineString,
3 + OGRGeomType.wkb25bit: Polygon,
4 + OGRGeomType.wkb25bit: MultiPoint,
5 + OGRGeomType.wkb25bit: MultiLineString,
6 + OGRGeomType.wkb25bit: MultiPolygon,
7 + OGRGeomType.wkb25bit: GeometryCollection,
}
|
pol51/pjsip-winphone | refs/heads/master | tests/pjsua/scripts-recvfrom/230_reg_bad_fail_stale_true.py | 42 | # $Id$
import inc_sip as sip
import inc_sdp as sdp
# In this test we simulate broken server, where it always sends
# stale=true with all 401 responses. We should expect pjsip to
# retry the authentication until PJSIP_MAX_STALE_COUNT is
# exceeded. When pjsip retries the authentication, it should
# use the new nonce from server
pjsua = "--null-audio --id=sip:CLIENT --registrar sip:127.0.0.1:$PORT " + \
"--realm=python --user=username --password=password"
req1 = sip.RecvfromTransaction("Initial request", 401,
include=["REGISTER sip"],
exclude=["Authorization"],
resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"1\""]
)
req2 = sip.RecvfromTransaction("First retry", 401,
include=["REGISTER sip", "Authorization", "nonce=\"1\""],
exclude=["Authorization:[\\s\\S]+Authorization:"],
resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"2\", stale=true"]
)
req3 = sip.RecvfromTransaction("Second retry retry", 401,
include=["REGISTER sip", "Authorization", "nonce=\"2\""],
exclude=["Authorization:[\\s\\S]+Authorization:"],
resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"3\", stale=true"]
)
req4 = sip.RecvfromTransaction("Third retry", 401,
include=["REGISTER sip", "Authorization", "nonce=\"3\""],
exclude=["Authorization:[\\s\\S]+Authorization:"],
resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"4\", stale=true"],
expect="PJSIP_EAUTHSTALECOUNT"
)
recvfrom_cfg = sip.RecvfromCfg("Failed registration retry (server rejects with stale=true) ",
pjsua, [req1, req2, req3, req4])
|
MoRgUiJu/morguiju.repo | refs/heads/master | plugin.video.adryanlist/pyaesnew/__init__.py | 68 | # The MIT License (MIT)
#
# Copyright (c) 2014 Richard Moore
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# This is a pure-Python implementation of the AES algorithm and AES common
# modes of operation.
# See: https://en.wikipedia.org/wiki/Advanced_Encryption_Standard
# See: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation
# Supported key sizes:
# 128-bit
# 192-bit
# 256-bit
# Supported modes of operation:
# ECB - Electronic Codebook
# CBC - Cipher-Block Chaining
# CFB - Cipher Feedback
# OFB - Output Feedback
# CTR - Counter
# See the README.md for API details and general information.
# Also useful, PyCrypto, a crypto library implemented in C with Python bindings:
# https://www.dlitz.net/software/pycrypto/
VERSION = [1, 3, 0]
from .aes import AES, AESModeOfOperationCTR, AESModeOfOperationCBC, AESModeOfOperationCFB, AESModeOfOperationECB, AESModeOfOperationOFB, AESModesOfOperation, Counter
from .blockfeeder import decrypt_stream, Decrypter, encrypt_stream, Encrypter
from .blockfeeder import PADDING_NONE, PADDING_DEFAULT
|
aspose-pdf/Aspose.Pdf-for-Java | refs/heads/master | Plugins/Aspose-Pdf-Java-for-Jython/asposepdf/WorkingWithPages/InsertEmptyPageAtEndOfFile.py | 1 | from asposepdf import Settings
from com.aspose.pdf import Document
from java.util import Date
class InsertEmptyPageAtEndOfFile:
def __init__(self):
dataDir = Settings.dataDir + 'WorkingWithPages/InsertEmptyPageAtEndOfFile/'
# Open the target document
pdf = Document(dataDir + 'input1.pdf')
# insert a empty page in a PDF
pdf.getPages().add()
# Save the concatenated output file (the target document)
pdf.save(dataDir + "output.pdf")
print "Empty page added successfully!"
if __name__ == '__main__':
InsertEmptyPageAtEndOfFile() |
delanoister-Andro-ID/GT-I9300-ICS-3.0.y | refs/heads/master | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
|
le9i0nx/ansible | refs/heads/devel | lib/ansible/modules/cloud/cloudstack/cs_project.py | 49 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_project
short_description: Manages projects on Apache CloudStack based clouds.
description:
- Create, update, suspend, activate and remove projects.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the project.
required: true
display_text:
description:
- Display text of the project.
- If not specified, C(name) will be used as C(display_text).
required: false
default: null
state:
description:
- State of the project.
required: false
default: 'present'
choices: [ 'present', 'absent', 'active', 'suspended' ]
domain:
description:
- Domain the project is related to.
required: false
default: null
account:
description:
- Account the project is related to.
required: false
default: null
tags:
description:
- List of tags. Tags are a list of dictionaries having keys C(key) and C(value).
- "If you want to delete all tags, set a empty list e.g. C(tags: [])."
required: false
default: null
version_added: "2.2"
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Create a project
- local_action:
module: cs_project
name: web
tags:
- { key: admin, value: john }
- { key: foo, value: bar }
# Rename a project
- local_action:
module: cs_project
name: web
display_text: my web project
# Suspend an existing project
- local_action:
module: cs_project
name: web
state: suspended
# Activate an existing project
- local_action:
module: cs_project
name: web
state: active
# Remove a project
- local_action:
module: cs_project
name: web
state: absent
'''
RETURN = '''
---
id:
description: UUID of the project.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: Name of the project.
returned: success
type: string
sample: web project
display_text:
description: Display text of the project.
returned: success
type: string
sample: web project
state:
description: State of the project.
returned: success
type: string
sample: Active
domain:
description: Domain the project is related to.
returned: success
type: string
sample: example domain
account:
description: Account the project is related to.
returned: success
type: string
sample: example account
tags:
description: List of resource tags associated with the project.
returned: success
type: dict
sample: '[ { "key": "foo", "value": "bar" } ]'
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together
)
class AnsibleCloudStackProject(AnsibleCloudStack):
def get_project(self):
if not self.project:
project = self.module.params.get('name')
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id')
}
projects = self.query_api('listProjects', **args)
if projects:
for p in projects['project']:
if project.lower() in [p['name'].lower(), p['id']]:
self.project = p
break
return self.project
def present_project(self):
project = self.get_project()
if not project:
project = self.create_project(project)
else:
project = self.update_project(project)
if project:
project = self.ensure_tags(resource=project, resource_type='project')
# refresh resource
self.project = project
return project
def update_project(self, project):
args = {
'id': project['id'],
'displaytext': self.get_or_fallback('display_text', 'name')
}
if self.has_changed(args, project):
self.result['changed'] = True
if not self.module.check_mode:
project = self.query_api('updateProject', **args)
poll_async = self.module.params.get('poll_async')
if project and poll_async:
project = self.poll_job(project, 'project')
return project
def create_project(self, project):
self.result['changed'] = True
args = {
'name': self.module.params.get('name'),
'displaytext': self.get_or_fallback('display_text', 'name'),
'account': self.get_account('name'),
'domainid': self.get_domain('id')
}
if not self.module.check_mode:
project = self.query_api('createProject', **args)
poll_async = self.module.params.get('poll_async')
if project and poll_async:
project = self.poll_job(project, 'project')
return project
def state_project(self, state='active'):
project = self.present_project()
if project['state'].lower() != state:
self.result['changed'] = True
args = {
'id': project['id']
}
if not self.module.check_mode:
if state == 'suspended':
project = self.query_api('suspendProject', **args)
else:
project = self.query_api('activateProject', **args)
poll_async = self.module.params.get('poll_async')
if project and poll_async:
project = self.poll_job(project, 'project')
return project
def absent_project(self):
project = self.get_project()
if project:
self.result['changed'] = True
args = {
'id': project['id']
}
if not self.module.check_mode:
res = self.query_api('deleteProject', **args)
poll_async = self.module.params.get('poll_async')
if res and poll_async:
res = self.poll_job(res, 'project')
return project
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
display_text=dict(),
state=dict(choices=['present', 'absent', 'active', 'suspended'], default='present'),
domain=dict(),
account=dict(),
poll_async=dict(type='bool', default=True),
tags=dict(type='list', aliases=['tag']),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_project = AnsibleCloudStackProject(module)
state = module.params.get('state')
if state in ['absent']:
project = acs_project.absent_project()
elif state in ['active', 'suspended']:
project = acs_project.state_project(state=state)
else:
project = acs_project.present_project()
result = acs_project.get_result(project)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
fedspendingtransparency/data-act-broker-backend | refs/heads/development | tests/unit/dataactvalidator/test_fabs32_detached_award_financial_assistance_1.py | 1 | from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabs32_detached_award_financial_assistance_1'
def test_column_headers(database):
expected_subset = {'row_number', 'period_of_performance_star', 'uniqueid_AssistanceTransactionUniqueKey'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" PeriodOfPerformanceStartDate is an optional field, but when provided, must follow YYYYMMDD format """
det_award_1 = DetachedAwardFinancialAssistanceFactory(period_of_performance_star='19990131',
correction_delete_indicatr='')
det_award_2 = DetachedAwardFinancialAssistanceFactory(period_of_performance_star=None,
correction_delete_indicatr='c')
det_award_3 = DetachedAwardFinancialAssistanceFactory(period_of_performance_star='',
correction_delete_indicatr=None)
# Ignore correction delete indicator of D
det_award_4 = DetachedAwardFinancialAssistanceFactory(period_of_performance_star='1234',
correction_delete_indicatr='d')
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4])
assert errors == 0
def test_failure(database):
""" PeriodOfPerformanceStartDate is an optional field, but when provided, must follow YYYYMMDD format """
det_award_1 = DetachedAwardFinancialAssistanceFactory(period_of_performance_star='19990132',
correction_delete_indicatr='')
det_award_2 = DetachedAwardFinancialAssistanceFactory(period_of_performance_star='19991331',
correction_delete_indicatr=None)
det_award_3 = DetachedAwardFinancialAssistanceFactory(period_of_performance_star='1234',
correction_delete_indicatr='c')
det_award_4 = DetachedAwardFinancialAssistanceFactory(period_of_performance_star='200912',
correction_delete_indicatr='C')
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4])
assert errors == 4
|
fsb4000/bitcoin | refs/heads/master | share/rpcuser/rpcuser.py | 9 | #!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import hashlib
import sys
import os
from random import SystemRandom
import base64
import hmac
if len(sys.argv) < 2:
sys.stderr.write('Please include username as an argument.\n')
sys.exit(0)
username = sys.argv[1]
#This uses os.urandom() underneath
cryptogen = SystemRandom()
#Create 16 byte hex salt
salt_sequence = [cryptogen.randrange(256) for i in range(16)]
hexseq = list(map(hex, salt_sequence))
salt = "".join([x[2:] for x in hexseq])
#Create 32 byte b64 password
password = base64.urlsafe_b64encode(os.urandom(32))
digestmod = hashlib.sha256
if sys.version_info.major >= 3:
password = password.decode('utf-8')
digestmod = 'SHA256'
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), digestmod)
result = m.hexdigest()
print("String to be appended to bitcoin.conf:")
print("rpcauth="+username+":"+salt+"$"+result)
print("Your password:\n"+password)
|
alexrao/YouCompleteMe | refs/heads/master | third_party/ycmd/third_party/requests/requests/packages/urllib3/contrib/ntlmpool.py | 1009 | """
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %
(self.num_connections, self.host, self.authurl))
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % reshdr)
log.debug('Response data: %s [...]' % res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % dict(res.getheaders()))
log.debug('Response data: %s [...]' % res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
|
JCROM-Android/jcrom_external_chromium_org | refs/heads/kitkat | third_party/jinja2/loaders.py | 108 | # -*- coding: utf-8 -*-
"""
jinja2.loaders
~~~~~~~~~~~~~~
Jinja loader classes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import weakref
from types import ModuleType
from os import path
try:
from hashlib import sha1
except ImportError:
from sha import new as sha1
from jinja2.exceptions import TemplateNotFound
from jinja2.utils import LRUCache, open_if_exists, internalcode
def split_template_path(template):
"""Split a path into segments and perform a sanity check. If it detects
'..' in the path it will raise a `TemplateNotFound` error.
"""
pieces = []
for piece in template.split('/'):
if path.sep in piece \
or (path.altsep and path.altsep in piece) or \
piece == path.pardir:
raise TemplateNotFound(template)
elif piece and piece != '.':
pieces.append(piece)
return pieces
class BaseLoader(object):
"""Baseclass for all loaders. Subclass this and override `get_source` to
implement a custom loading mechanism. The environment provides a
`get_template` method that calls the loader's `load` method to get the
:class:`Template` object.
A very basic example for a loader that looks up templates on the file
system could look like this::
from jinja2 import BaseLoader, TemplateNotFound
from os.path import join, exists, getmtime
class MyLoader(BaseLoader):
def __init__(self, path):
self.path = path
def get_source(self, environment, template):
path = join(self.path, template)
if not exists(path):
raise TemplateNotFound(template)
mtime = getmtime(path)
with file(path) as f:
source = f.read().decode('utf-8')
return source, path, lambda: mtime == getmtime(path)
"""
#: if set to `False` it indicates that the loader cannot provide access
#: to the source of templates.
#:
#: .. versionadded:: 2.4
has_source_access = True
def get_source(self, environment, template):
"""Get the template source, filename and reload helper for a template.
It's passed the environment and template name and has to return a
tuple in the form ``(source, filename, uptodate)`` or raise a
`TemplateNotFound` error if it can't locate the template.
The source part of the returned tuple must be the source of the
template as unicode string or a ASCII bytestring. The filename should
be the name of the file on the filesystem if it was loaded from there,
otherwise `None`. The filename is used by python for the tracebacks
if no loader extension is used.
The last item in the tuple is the `uptodate` function. If auto
reloading is enabled it's always called to check if the template
changed. No arguments are passed so the function must store the
old state somewhere (for example in a closure). If it returns `False`
the template will be reloaded.
"""
if not self.has_source_access:
raise RuntimeError('%s cannot provide access to the source' %
self.__class__.__name__)
raise TemplateNotFound(template)
def list_templates(self):
"""Iterates over all templates. If the loader does not support that
it should raise a :exc:`TypeError` which is the default behavior.
"""
raise TypeError('this loader cannot iterate over all templates')
@internalcode
def load(self, environment, name, globals=None):
"""Loads a template. This method looks up the template in the cache
or loads one by calling :meth:`get_source`. Subclasses should not
override this method as loaders working on collections of other
loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
will not call this method but `get_source` directly.
"""
code = None
if globals is None:
globals = {}
# first we try to get the source for this template together
# with the filename and the uptodate function.
source, filename, uptodate = self.get_source(environment, name)
# try to load the code from the bytecode cache if there is a
# bytecode cache configured.
bcc = environment.bytecode_cache
if bcc is not None:
bucket = bcc.get_bucket(environment, name, filename, source)
code = bucket.code
# if we don't have code so far (not cached, no longer up to
# date) etc. we compile the template
if code is None:
code = environment.compile(source, name, filename)
# if the bytecode cache is available and the bucket doesn't
# have a code so far, we give the bucket the new code and put
# it back to the bytecode cache.
if bcc is not None and bucket.code is None:
bucket.code = code
bcc.set_bucket(bucket)
return environment.template_class.from_code(environment, code,
globals, uptodate)
class FileSystemLoader(BaseLoader):
"""Loads templates from the file system. This loader can find templates
in folders on the file system and is the preferred way to load them.
The loader takes the path to the templates as string, or if multiple
locations are wanted a list of them which is then looked up in the
given order:
>>> loader = FileSystemLoader('/path/to/templates')
>>> loader = FileSystemLoader(['/path/to/templates', '/other/path'])
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else.
"""
def __init__(self, searchpath, encoding='utf-8'):
if isinstance(searchpath, basestring):
searchpath = [searchpath]
self.searchpath = list(searchpath)
self.encoding = encoding
def get_source(self, environment, template):
pieces = split_template_path(template)
for searchpath in self.searchpath:
filename = path.join(searchpath, *pieces)
f = open_if_exists(filename)
if f is None:
continue
try:
contents = f.read().decode(self.encoding)
finally:
f.close()
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
return contents, filename, uptodate
raise TemplateNotFound(template)
def list_templates(self):
found = set()
for searchpath in self.searchpath:
for dirpath, dirnames, filenames in os.walk(searchpath):
for filename in filenames:
template = os.path.join(dirpath, filename) \
[len(searchpath):].strip(os.path.sep) \
.replace(os.path.sep, '/')
if template[:2] == './':
template = template[2:]
if template not in found:
found.add(template)
return sorted(found)
class PackageLoader(BaseLoader):
"""Load templates from python eggs or packages. It is constructed with
the name of the python package and the path to the templates in that
package::
loader = PackageLoader('mypackage', 'views')
If the package path is not given, ``'templates'`` is assumed.
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else. Due to the nature
of eggs it's only possible to reload templates if the package was loaded
from the file system and not a zip file.
"""
def __init__(self, package_name, package_path='templates',
encoding='utf-8'):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
provider = get_provider(package_name)
self.encoding = encoding
self.manager = ResourceManager()
self.filesystem_bound = isinstance(provider, DefaultProvider)
self.provider = provider
self.package_path = package_path
def get_source(self, environment, template):
pieces = split_template_path(template)
p = '/'.join((self.package_path,) + tuple(pieces))
if not self.provider.has_resource(p):
raise TemplateNotFound(template)
filename = uptodate = None
if self.filesystem_bound:
filename = self.provider.get_resource_filename(self.manager, p)
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
source = self.provider.get_resource_string(self.manager, p)
return source.decode(self.encoding), filename, uptodate
def list_templates(self):
path = self.package_path
if path[:2] == './':
path = path[2:]
elif path == '.':
path = ''
offset = len(path)
results = []
def _walk(path):
for filename in self.provider.resource_listdir(path):
fullname = path + '/' + filename
if self.provider.resource_isdir(fullname):
_walk(fullname)
else:
results.append(fullname[offset:].lstrip('/'))
_walk(path)
results.sort()
return results
class DictLoader(BaseLoader):
"""Loads a template from a python dict. It's passed a dict of unicode
strings bound to template names. This loader is useful for unittesting:
>>> loader = DictLoader({'index.html': 'source here'})
Because auto reloading is rarely useful this is disabled per default.
"""
def __init__(self, mapping):
self.mapping = mapping
def get_source(self, environment, template):
if template in self.mapping:
source = self.mapping[template]
return source, None, lambda: source != self.mapping.get(template)
raise TemplateNotFound(template)
def list_templates(self):
return sorted(self.mapping)
class FunctionLoader(BaseLoader):
"""A loader that is passed a function which does the loading. The
function becomes the name of the template passed and has to return either
an unicode string with the template source, a tuple in the form ``(source,
filename, uptodatefunc)`` or `None` if the template does not exist.
>>> def load_template(name):
... if name == 'index.html':
... return '...'
...
>>> loader = FunctionLoader(load_template)
The `uptodatefunc` is a function that is called if autoreload is enabled
and has to return `True` if the template is still up to date. For more
details have a look at :meth:`BaseLoader.get_source` which has the same
return value.
"""
def __init__(self, load_func):
self.load_func = load_func
def get_source(self, environment, template):
rv = self.load_func(template)
if rv is None:
raise TemplateNotFound(template)
elif isinstance(rv, basestring):
return rv, None, None
return rv
class PrefixLoader(BaseLoader):
"""A loader that is passed a dict of loaders where each loader is bound
to a prefix. The prefix is delimited from the template by a slash per
default, which can be changed by setting the `delimiter` argument to
something else::
loader = PrefixLoader({
'app1': PackageLoader('mypackage.app1'),
'app2': PackageLoader('mypackage.app2')
})
By loading ``'app1/index.html'`` the file from the app1 package is loaded,
by loading ``'app2/index.html'`` the file from the second.
"""
def __init__(self, mapping, delimiter='/'):
self.mapping = mapping
self.delimiter = delimiter
def get_source(self, environment, template):
try:
prefix, name = template.split(self.delimiter, 1)
loader = self.mapping[prefix]
except (ValueError, KeyError):
raise TemplateNotFound(template)
try:
return loader.get_source(environment, name)
except TemplateNotFound:
# re-raise the exception with the correct fileame here.
# (the one that includes the prefix)
raise TemplateNotFound(template)
def list_templates(self):
result = []
for prefix, loader in self.mapping.iteritems():
for template in loader.list_templates():
result.append(prefix + self.delimiter + template)
return result
class ChoiceLoader(BaseLoader):
"""This loader works like the `PrefixLoader` just that no prefix is
specified. If a template could not be found by one loader the next one
is tried.
>>> loader = ChoiceLoader([
... FileSystemLoader('/path/to/user/templates'),
... FileSystemLoader('/path/to/system/templates')
... ])
This is useful if you want to allow users to override builtin templates
from a different location.
"""
def __init__(self, loaders):
self.loaders = loaders
def get_source(self, environment, template):
for loader in self.loaders:
try:
return loader.get_source(environment, template)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
def list_templates(self):
found = set()
for loader in self.loaders:
found.update(loader.list_templates())
return sorted(found)
class _TemplateModule(ModuleType):
"""Like a normal module but with support for weak references"""
class ModuleLoader(BaseLoader):
"""This loader loads templates from precompiled templates.
Example usage:
>>> loader = ChoiceLoader([
... ModuleLoader('/path/to/compiled/templates'),
... FileSystemLoader('/path/to/templates')
... ])
Templates can be precompiled with :meth:`Environment.compile_templates`.
"""
has_source_access = False
def __init__(self, path):
package_name = '_jinja2_module_templates_%x' % id(self)
# create a fake module that looks for the templates in the
# path given.
mod = _TemplateModule(package_name)
if isinstance(path, basestring):
path = [path]
else:
path = list(path)
mod.__path__ = path
sys.modules[package_name] = weakref.proxy(mod,
lambda x: sys.modules.pop(package_name, None))
# the only strong reference, the sys.modules entry is weak
# so that the garbage collector can remove it once the
# loader that created it goes out of business.
self.module = mod
self.package_name = package_name
@staticmethod
def get_template_key(name):
return 'tmpl_' + sha1(name.encode('utf-8')).hexdigest()
@staticmethod
def get_module_filename(name):
return ModuleLoader.get_template_key(name) + '.py'
@internalcode
def load(self, environment, name, globals=None):
key = self.get_template_key(name)
module = '%s.%s' % (self.package_name, key)
mod = getattr(self.module, module, None)
if mod is None:
try:
mod = __import__(module, None, None, ['root'])
except ImportError:
raise TemplateNotFound(name)
# remove the entry from sys.modules, we only want the attribute
# on the module object we have stored on the loader.
sys.modules.pop(module, None)
return environment.template_class.from_module_dict(
environment, mod.__dict__, globals)
|
youngking/protobuf | refs/heads/master | python/google/protobuf/message_factory.py | 228 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides a factory class for generating dynamic messages."""
__author__ = 'matthewtoia@google.com (Matt Toia)'
from google.protobuf import descriptor_database
from google.protobuf import descriptor_pool
from google.protobuf import message
from google.protobuf import reflection
class MessageFactory(object):
"""Factory for creating Proto2 messages from descriptors in a pool."""
def __init__(self):
"""Initializes a new factory."""
self._classes = {}
def GetPrototype(self, descriptor):
"""Builds a proto2 message class based on the passed in descriptor.
Passing a descriptor with a fully qualified name matching a previous
invocation will cause the same class to be returned.
Args:
descriptor: The descriptor to build from.
Returns:
A class describing the passed in descriptor.
"""
if descriptor.full_name not in self._classes:
result_class = reflection.GeneratedProtocolMessageType(
descriptor.name.encode('ascii', 'ignore'),
(message.Message,),
{'DESCRIPTOR': descriptor})
self._classes[descriptor.full_name] = result_class
for field in descriptor.fields:
if field.message_type:
self.GetPrototype(field.message_type)
return self._classes[descriptor.full_name]
_DB = descriptor_database.DescriptorDatabase()
_POOL = descriptor_pool.DescriptorPool(_DB)
_FACTORY = MessageFactory()
def GetMessages(file_protos):
"""Builds a dictionary of all the messages available in a set of files.
Args:
file_protos: A sequence of file protos to build messages out of.
Returns:
A dictionary containing all the message types in the files mapping the
fully qualified name to a Message subclass for the descriptor.
"""
result = {}
for file_proto in file_protos:
_DB.Add(file_proto)
for file_proto in file_protos:
for desc in _GetAllDescriptors(file_proto.message_type, file_proto.package):
result[desc.full_name] = _FACTORY.GetPrototype(desc)
return result
def _GetAllDescriptors(desc_protos, package):
"""Gets all levels of nested message types as a flattened list of descriptors.
Args:
desc_protos: The descriptor protos to process.
package: The package where the protos are defined.
Yields:
Each message descriptor for each nested type.
"""
for desc_proto in desc_protos:
name = '.'.join((package, desc_proto.name))
yield _POOL.FindMessageTypeByName(name)
for nested_desc in _GetAllDescriptors(desc_proto.nested_type, name):
yield nested_desc
|
tsufiev/horizon | refs/heads/master | openstack_dashboard/dashboards/project/access_and_security/floating_ips/tables.py | 2 | # Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django import shortcuts
from django.utils.http import urlencode
from django.utils.translation import pgettext_lazy
from django.utils.translation import string_concat # noqa
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import messages
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.usage import quotas
from openstack_dashboard.utils import filters
LOG = logging.getLogger(__name__)
POLICY_CHECK = getattr(settings, "POLICY_CHECK_FUNCTION", lambda p, r: True)
class AllocateIP(tables.LinkAction):
name = "allocate"
verbose_name = _("Allocate IP To Project")
classes = ("ajax-modal",)
icon = "link"
url = "horizon:project:access_and_security:floating_ips:allocate"
def single(self, data_table, request, *args):
return shortcuts.redirect('horizon:project:access_and_security:index')
def allowed(self, request, volume=None):
usages = quotas.tenant_quota_usages(request)
if usages['floating_ips']['available'] <= 0:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = string_concat(self.verbose_name, ' ',
_("(Quota exceeded)"))
else:
self.verbose_name = _("Allocate IP To Project")
classes = [c for c in self.classes if c != "disabled"]
self.classes = classes
if api.base.is_service_enabled(request, "network"):
policy = (("network", "create_floatingip"),)
else:
policy = (("compute", "compute_extension:floating_ips"),
("compute", "network:allocate_floating_ip"),)
return POLICY_CHECK(policy, request)
class ReleaseIPs(tables.BatchAction):
name = "release"
classes = ('btn-danger',)
icon = "unlink"
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Release Floating IP",
u"Release Floating IPs",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Released Floating IP",
u"Released Floating IPs",
count
)
def allowed(self, request, fip=None):
if api.base.is_service_enabled(request, "network"):
policy = (("network", "delete_floatingip"),)
else:
policy = (("compute", "compute_extension:floating_ips"),
("compute", "network:release_floating_ip"),)
return POLICY_CHECK(policy, request)
def action(self, request, obj_id):
api.network.tenant_floating_ip_release(request, obj_id)
class AssociateIP(tables.LinkAction):
name = "associate"
verbose_name = _("Associate")
url = "horizon:project:access_and_security:floating_ips:associate"
classes = ("ajax-modal",)
icon = "link"
def allowed(self, request, fip):
if api.base.is_service_enabled(request, "network"):
policy = (("network", "update_floatingip"),)
else:
policy = (("compute", "compute_extension:floating_ips"),
("compute", "network:associate_floating_ip"),)
return not fip.port_id and POLICY_CHECK(policy, request)
def get_link_url(self, datum):
base_url = reverse(self.url)
params = urlencode({"ip_id": self.table.get_object_id(datum)})
return "?".join([base_url, params])
class DisassociateIP(tables.Action):
name = "disassociate"
verbose_name = _("Disassociate")
classes = ("btn-disassociate", "btn-danger")
icon = "unlink"
def allowed(self, request, fip):
if api.base.is_service_enabled(request, "network"):
policy = (("network", "update_floatingip"),)
else:
policy = (("compute", "compute_extension:floating_ips"),
("compute", "network:disassociate_floating_ip"),)
return fip.port_id and POLICY_CHECK(policy, request)
def single(self, table, request, obj_id):
try:
fip = table.get_object_by_id(filters.get_int_or_uuid(obj_id))
api.network.floating_ip_disassociate(request, fip.id)
LOG.info('Disassociating Floating IP "%s".' % obj_id)
messages.success(request,
_('Successfully disassociated Floating IP: %s')
% fip.ip)
except Exception:
exceptions.handle(request,
_('Unable to disassociate floating IP.'))
return shortcuts.redirect('horizon:project:access_and_security:index')
def get_instance_info(fip):
if fip.instance_type == 'compute':
return (_("%(instance_name)s %(fixed_ip)s")
% {'instance_name': getattr(fip, "instance_name", ''),
'fixed_ip': fip.fixed_ip})
elif fip.instance_type == 'loadbalancer':
return _("Load Balancer VIP %s") % fip.fixed_ip
elif fip.instance_type:
return fip.fixed_ip
else:
return None
def get_instance_link(datum):
if datum.instance_type == 'compute':
return reverse("horizon:project:instances:detail",
args=(datum.instance_id,))
else:
return None
STATUS_DISPLAY_CHOICES = (
("active", pgettext_lazy("Current status of a Floating IP", u"Active")),
("down", pgettext_lazy("Current status of a Floating IP", u"Down")),
("error", pgettext_lazy("Current status of a Floating IP", u"Error")),
)
class FloatingIPsTable(tables.DataTable):
STATUS_CHOICES = (
("active", True),
("down", True),
("error", False)
)
ip = tables.Column("ip",
verbose_name=_("IP Address"),
attrs={'data-type': "ip"})
fixed_ip = tables.Column(get_instance_info,
link=get_instance_link,
verbose_name=_("Mapped Fixed IP Address"))
pool = tables.Column("pool_name",
verbose_name=_("Pool"))
status = tables.Column("status",
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
def __init__(self, request, data=None, needs_form_wrapper=None, **kwargs):
super(FloatingIPsTable, self).__init__(
request, data=data, needs_form_wrapper=needs_form_wrapper,
**kwargs)
if not api.base.is_service_enabled(request, 'network'):
del self.columns['status']
def sanitize_id(self, obj_id):
return filters.get_int_or_uuid(obj_id)
def get_object_display(self, datum):
return datum.ip
class Meta(object):
name = "floating_ips"
verbose_name = _("Floating IPs")
table_actions = (AllocateIP, ReleaseIPs)
row_actions = (AssociateIP, DisassociateIP, ReleaseIPs)
|
charukiewicz/beer-manager | refs/heads/master | venv/lib/python3.4/site-packages/pip/_vendor/distlib/resources.py | 191 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import bisect
import io
import logging
import os
import pkgutil
import shutil
import sys
import types
import zipimport
from . import DistlibException
from .util import cached_property, get_cache_base, path_to_cache_dir, Cache
logger = logging.getLogger(__name__)
cache = None # created when needed
class ResourceCache(Cache):
def __init__(self, base=None):
if base is None:
# Use native string to avoid issues on 2.x: see Python #20140.
base = os.path.join(get_cache_base(), str('resource-cache'))
super(ResourceCache, self).__init__(base)
def is_stale(self, resource, path):
"""
Is the cache stale for the given resource?
:param resource: The :class:`Resource` being cached.
:param path: The path of the resource in the cache.
:return: True if the cache is stale.
"""
# Cache invalidation is a hard problem :-)
return True
def get(self, resource):
"""
Get a resource into the cache,
:param resource: A :class:`Resource` instance.
:return: The pathname of the resource in the cache.
"""
prefix, path = resource.finder.get_cache_info(resource)
if prefix is None:
result = path
else:
result = os.path.join(self.base, self.prefix_to_dir(prefix), path)
dirname = os.path.dirname(result)
if not os.path.isdir(dirname):
os.makedirs(dirname)
if not os.path.exists(result):
stale = True
else:
stale = self.is_stale(resource, path)
if stale:
# write the bytes of the resource to the cache location
with open(result, 'wb') as f:
f.write(resource.bytes)
return result
class ResourceBase(object):
def __init__(self, finder, name):
self.finder = finder
self.name = name
class Resource(ResourceBase):
"""
A class representing an in-package resource, such as a data file. This is
not normally instantiated by user code, but rather by a
:class:`ResourceFinder` which manages the resource.
"""
is_container = False # Backwards compatibility
def as_stream(self):
"""
Get the resource as a stream.
This is not a property to make it obvious that it returns a new stream
each time.
"""
return self.finder.get_stream(self)
@cached_property
def file_path(self):
global cache
if cache is None:
cache = ResourceCache()
return cache.get(self)
@cached_property
def bytes(self):
return self.finder.get_bytes(self)
@cached_property
def size(self):
return self.finder.get_size(self)
class ResourceContainer(ResourceBase):
is_container = True # Backwards compatibility
@cached_property
def resources(self):
return self.finder.get_resources(self)
class ResourceFinder(object):
"""
Resource finder for file system resources.
"""
def __init__(self, module):
self.module = module
self.loader = getattr(module, '__loader__', None)
self.base = os.path.dirname(getattr(module, '__file__', ''))
def _adjust_path(self, path):
return os.path.realpath(path)
def _make_path(self, resource_name):
parts = resource_name.split('/')
parts.insert(0, self.base)
result = os.path.join(*parts)
return self._adjust_path(result)
def _find(self, path):
return os.path.exists(path)
def get_cache_info(self, resource):
return None, resource.path
def find(self, resource_name):
path = self._make_path(resource_name)
if not self._find(path):
result = None
else:
if self._is_directory(path):
result = ResourceContainer(self, resource_name)
else:
result = Resource(self, resource_name)
result.path = path
return result
def get_stream(self, resource):
return open(resource.path, 'rb')
def get_bytes(self, resource):
with open(resource.path, 'rb') as f:
return f.read()
def get_size(self, resource):
return os.path.getsize(resource.path)
def get_resources(self, resource):
def allowed(f):
return f != '__pycache__' and not f.endswith(('.pyc', '.pyo'))
return set([f for f in os.listdir(resource.path) if allowed(f)])
def is_container(self, resource):
return self._is_directory(resource.path)
_is_directory = staticmethod(os.path.isdir)
class ZipResourceFinder(ResourceFinder):
"""
Resource finder for resources in .zip files.
"""
def __init__(self, module):
super(ZipResourceFinder, self).__init__(module)
archive = self.loader.archive
self.prefix_len = 1 + len(archive)
# PyPy doesn't have a _files attr on zipimporter, and you can't set one
if hasattr(self.loader, '_files'):
self._files = self.loader._files
else:
self._files = zipimport._zip_directory_cache[archive]
self.index = sorted(self._files)
def _adjust_path(self, path):
return path
def _find(self, path):
path = path[self.prefix_len:]
if path in self._files:
result = True
else:
if path and path[-1] != os.sep:
path = path + os.sep
i = bisect.bisect(self.index, path)
try:
result = self.index[i].startswith(path)
except IndexError:
result = False
if not result:
logger.debug('_find failed: %r %r', path, self.loader.prefix)
else:
logger.debug('_find worked: %r %r', path, self.loader.prefix)
return result
def get_cache_info(self, resource):
prefix = self.loader.archive
path = resource.path[1 + len(prefix):]
return prefix, path
def get_bytes(self, resource):
return self.loader.get_data(resource.path)
def get_stream(self, resource):
return io.BytesIO(self.get_bytes(resource))
def get_size(self, resource):
path = resource.path[self.prefix_len:]
return self._files[path][3]
def get_resources(self, resource):
path = resource.path[self.prefix_len:]
if path and path[-1] != os.sep:
path += os.sep
plen = len(path)
result = set()
i = bisect.bisect(self.index, path)
while i < len(self.index):
if not self.index[i].startswith(path):
break
s = self.index[i][plen:]
result.add(s.split(os.sep, 1)[0]) # only immediate children
i += 1
return result
def _is_directory(self, path):
path = path[self.prefix_len:]
if path and path[-1] != os.sep:
path += os.sep
i = bisect.bisect(self.index, path)
try:
result = self.index[i].startswith(path)
except IndexError:
result = False
return result
_finder_registry = {
type(None): ResourceFinder,
zipimport.zipimporter: ZipResourceFinder
}
try:
import _frozen_importlib
_finder_registry[_frozen_importlib.SourceFileLoader] = ResourceFinder
_finder_registry[_frozen_importlib.FileFinder] = ResourceFinder
except (ImportError, AttributeError):
pass
def register_finder(loader, finder_maker):
_finder_registry[type(loader)] = finder_maker
_finder_cache = {}
def finder(package):
"""
Return a resource finder for a package.
:param package: The name of the package.
:return: A :class:`ResourceFinder` instance for the package.
"""
if package in _finder_cache:
result = _finder_cache[package]
else:
if package not in sys.modules:
__import__(package)
module = sys.modules[package]
path = getattr(module, '__path__', None)
if path is None:
raise DistlibException('You cannot get a finder for a module, '
'only for a package')
loader = getattr(module, '__loader__', None)
finder_maker = _finder_registry.get(type(loader))
if finder_maker is None:
raise DistlibException('Unable to locate finder for %r' % package)
result = finder_maker(module)
_finder_cache[package] = result
return result
_dummy_module = types.ModuleType(str('__dummy__'))
def finder_for_path(path):
"""
Return a resource finder for a path, which should represent a container.
:param path: The path.
:return: A :class:`ResourceFinder` instance for the path.
"""
result = None
# calls any path hooks, gets importer into cache
pkgutil.get_importer(path)
loader = sys.path_importer_cache.get(path)
finder = _finder_registry.get(type(loader))
if finder:
module = _dummy_module
module.__file__ = os.path.join(path, '')
module.__loader__ = loader
result = finder(module)
return result
|
ProfessionalIT/maxigenios-website | refs/heads/master | sdk/google_appengine/lib/cherrypy/cherrypy/test/test_conn.py | 53 | """Tests for TCP connection handling, including proper and timely close."""
import socket
import sys
import time
timeout = 1
import cherrypy
from cherrypy._cpcompat import HTTPConnection, HTTPSConnection, NotConnected, BadStatusLine
from cherrypy._cpcompat import ntob, urlopen, unicodestr
from cherrypy.test import webtest
from cherrypy import _cperror
pov = 'pPeErRsSiIsStTeEnNcCeE oOfF vViIsSiIoOnN'
def setup_server():
def raise500():
raise cherrypy.HTTPError(500)
class Root:
def index(self):
return pov
index.exposed = True
page1 = index
page2 = index
page3 = index
def hello(self):
return "Hello, world!"
hello.exposed = True
def timeout(self, t):
return str(cherrypy.server.httpserver.timeout)
timeout.exposed = True
def stream(self, set_cl=False):
if set_cl:
cherrypy.response.headers['Content-Length'] = 10
def content():
for x in range(10):
yield str(x)
return content()
stream.exposed = True
stream._cp_config = {'response.stream': True}
def error(self, code=500):
raise cherrypy.HTTPError(code)
error.exposed = True
def upload(self):
if not cherrypy.request.method == 'POST':
raise AssertionError("'POST' != request.method %r" %
cherrypy.request.method)
return "thanks for '%s'" % cherrypy.request.body.read()
upload.exposed = True
def custom(self, response_code):
cherrypy.response.status = response_code
return "Code = %s" % response_code
custom.exposed = True
def err_before_read(self):
return "ok"
err_before_read.exposed = True
err_before_read._cp_config = {'hooks.on_start_resource': raise500}
def one_megabyte_of_a(self):
return ["a" * 1024] * 1024
one_megabyte_of_a.exposed = True
def custom_cl(self, body, cl):
cherrypy.response.headers['Content-Length'] = cl
if not isinstance(body, list):
body = [body]
newbody = []
for chunk in body:
if isinstance(chunk, unicodestr):
chunk = chunk.encode('ISO-8859-1')
newbody.append(chunk)
return newbody
custom_cl.exposed = True
# Turn off the encoding tool so it doens't collapse
# our response body and reclaculate the Content-Length.
custom_cl._cp_config = {'tools.encode.on': False}
cherrypy.tree.mount(Root())
cherrypy.config.update({
'server.max_request_body_size': 1001,
'server.socket_timeout': timeout,
})
from cherrypy.test import helper
class ConnectionCloseTests(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test_HTTP11(self):
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
self.PROTOCOL = "HTTP/1.1"
self.persistent = True
# Make the first request and assert there's no "Connection: close".
self.getPage("/")
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertNoHeader("Connection")
# Make another request on the same connection.
self.getPage("/page1")
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertNoHeader("Connection")
# Test client-side close.
self.getPage("/page2", headers=[("Connection", "close")])
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertHeader("Connection", "close")
# Make another request on the same connection, which should error.
self.assertRaises(NotConnected, self.getPage, "/")
def test_Streaming_no_len(self):
self._streaming(set_cl=False)
def test_Streaming_with_len(self):
self._streaming(set_cl=True)
def _streaming(self, set_cl):
if cherrypy.server.protocol_version == "HTTP/1.1":
self.PROTOCOL = "HTTP/1.1"
self.persistent = True
# Make the first request and assert there's no "Connection: close".
self.getPage("/")
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertNoHeader("Connection")
# Make another, streamed request on the same connection.
if set_cl:
# When a Content-Length is provided, the content should stream
# without closing the connection.
self.getPage("/stream?set_cl=Yes")
self.assertHeader("Content-Length")
self.assertNoHeader("Connection", "close")
self.assertNoHeader("Transfer-Encoding")
self.assertStatus('200 OK')
self.assertBody('0123456789')
else:
# When no Content-Length response header is provided,
# streamed output will either close the connection, or use
# chunked encoding, to determine transfer-length.
self.getPage("/stream")
self.assertNoHeader("Content-Length")
self.assertStatus('200 OK')
self.assertBody('0123456789')
chunked_response = False
for k, v in self.headers:
if k.lower() == "transfer-encoding":
if str(v) == "chunked":
chunked_response = True
if chunked_response:
self.assertNoHeader("Connection", "close")
else:
self.assertHeader("Connection", "close")
# Make another request on the same connection, which should error.
self.assertRaises(NotConnected, self.getPage, "/")
# Try HEAD. See http://www.cherrypy.org/ticket/864.
self.getPage("/stream", method='HEAD')
self.assertStatus('200 OK')
self.assertBody('')
self.assertNoHeader("Transfer-Encoding")
else:
self.PROTOCOL = "HTTP/1.0"
self.persistent = True
# Make the first request and assert Keep-Alive.
self.getPage("/", headers=[("Connection", "Keep-Alive")])
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertHeader("Connection", "Keep-Alive")
# Make another, streamed request on the same connection.
if set_cl:
# When a Content-Length is provided, the content should
# stream without closing the connection.
self.getPage("/stream?set_cl=Yes",
headers=[("Connection", "Keep-Alive")])
self.assertHeader("Content-Length")
self.assertHeader("Connection", "Keep-Alive")
self.assertNoHeader("Transfer-Encoding")
self.assertStatus('200 OK')
self.assertBody('0123456789')
else:
# When a Content-Length is not provided,
# the server should close the connection.
self.getPage("/stream", headers=[("Connection", "Keep-Alive")])
self.assertStatus('200 OK')
self.assertBody('0123456789')
self.assertNoHeader("Content-Length")
self.assertNoHeader("Connection", "Keep-Alive")
self.assertNoHeader("Transfer-Encoding")
# Make another request on the same connection, which should error.
self.assertRaises(NotConnected, self.getPage, "/")
def test_HTTP10_KeepAlive(self):
self.PROTOCOL = "HTTP/1.0"
if self.scheme == "https":
self.HTTP_CONN = HTTPSConnection
else:
self.HTTP_CONN = HTTPConnection
# Test a normal HTTP/1.0 request.
self.getPage("/page2")
self.assertStatus('200 OK')
self.assertBody(pov)
# Apache, for example, may emit a Connection header even for HTTP/1.0
## self.assertNoHeader("Connection")
# Test a keep-alive HTTP/1.0 request.
self.persistent = True
self.getPage("/page3", headers=[("Connection", "Keep-Alive")])
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertHeader("Connection", "Keep-Alive")
# Remove the keep-alive header again.
self.getPage("/page3")
self.assertStatus('200 OK')
self.assertBody(pov)
# Apache, for example, may emit a Connection header even for HTTP/1.0
## self.assertNoHeader("Connection")
class PipelineTests(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test_HTTP11_Timeout(self):
# If we timeout without sending any data,
# the server will close the conn with a 408.
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
self.PROTOCOL = "HTTP/1.1"
# Connect but send nothing.
self.persistent = True
conn = self.HTTP_CONN
conn.auto_open = False
conn.connect()
# Wait for our socket timeout
time.sleep(timeout * 2)
# The request should have returned 408 already.
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.assertEqual(response.status, 408)
conn.close()
# Connect but send half the headers only.
self.persistent = True
conn = self.HTTP_CONN
conn.auto_open = False
conn.connect()
conn.send(ntob('GET /hello HTTP/1.1'))
conn.send(("Host: %s" % self.HOST).encode('ascii'))
# Wait for our socket timeout
time.sleep(timeout * 2)
# The conn should have already sent 408.
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.assertEqual(response.status, 408)
conn.close()
def test_HTTP11_Timeout_after_request(self):
# If we timeout after at least one request has succeeded,
# the server will close the conn without 408.
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
self.PROTOCOL = "HTTP/1.1"
# Make an initial request
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest("GET", "/timeout?t=%s" % timeout, skip_host=True)
conn.putheader("Host", self.HOST)
conn.endheaders()
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.assertEqual(response.status, 200)
self.body = response.read()
self.assertBody(str(timeout))
# Make a second request on the same socket
conn._output(ntob('GET /hello HTTP/1.1'))
conn._output(ntob("Host: %s" % self.HOST, 'ascii'))
conn._send_output()
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.assertEqual(response.status, 200)
self.body = response.read()
self.assertBody("Hello, world!")
# Wait for our socket timeout
time.sleep(timeout * 2)
# Make another request on the same socket, which should error
conn._output(ntob('GET /hello HTTP/1.1'))
conn._output(ntob("Host: %s" % self.HOST, 'ascii'))
conn._send_output()
response = conn.response_class(conn.sock, method="GET")
try:
response.begin()
except:
if not isinstance(sys.exc_info()[1],
(socket.error, BadStatusLine)):
self.fail("Writing to timed out socket didn't fail"
" as it should have: %s" % sys.exc_info()[1])
else:
if response.status != 408:
self.fail("Writing to timed out socket didn't fail"
" as it should have: %s" %
response.read())
conn.close()
# Make another request on a new socket, which should work
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest("GET", "/", skip_host=True)
conn.putheader("Host", self.HOST)
conn.endheaders()
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.assertEqual(response.status, 200)
self.body = response.read()
self.assertBody(pov)
# Make another request on the same socket,
# but timeout on the headers
conn.send(ntob('GET /hello HTTP/1.1'))
# Wait for our socket timeout
time.sleep(timeout * 2)
response = conn.response_class(conn.sock, method="GET")
try:
response.begin()
except:
if not isinstance(sys.exc_info()[1],
(socket.error, BadStatusLine)):
self.fail("Writing to timed out socket didn't fail"
" as it should have: %s" % sys.exc_info()[1])
else:
self.fail("Writing to timed out socket didn't fail"
" as it should have: %s" %
response.read())
conn.close()
# Retry the request on a new connection, which should work
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest("GET", "/", skip_host=True)
conn.putheader("Host", self.HOST)
conn.endheaders()
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.assertEqual(response.status, 200)
self.body = response.read()
self.assertBody(pov)
conn.close()
def test_HTTP11_pipelining(self):
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
self.PROTOCOL = "HTTP/1.1"
# Test pipelining. httplib doesn't support this directly.
self.persistent = True
conn = self.HTTP_CONN
# Put request 1
conn.putrequest("GET", "/hello", skip_host=True)
conn.putheader("Host", self.HOST)
conn.endheaders()
for trial in range(5):
# Put next request
conn._output(ntob('GET /hello HTTP/1.1'))
conn._output(ntob("Host: %s" % self.HOST, 'ascii'))
conn._send_output()
# Retrieve previous response
response = conn.response_class(conn.sock, method="GET")
response.begin()
body = response.read(13)
self.assertEqual(response.status, 200)
self.assertEqual(body, ntob("Hello, world!"))
# Retrieve final response
response = conn.response_class(conn.sock, method="GET")
response.begin()
body = response.read()
self.assertEqual(response.status, 200)
self.assertEqual(body, ntob("Hello, world!"))
conn.close()
def test_100_Continue(self):
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
self.PROTOCOL = "HTTP/1.1"
self.persistent = True
conn = self.HTTP_CONN
# Try a page without an Expect request header first.
# Note that httplib's response.begin automatically ignores
# 100 Continue responses, so we must manually check for it.
conn.putrequest("POST", "/upload", skip_host=True)
conn.putheader("Host", self.HOST)
conn.putheader("Content-Type", "text/plain")
conn.putheader("Content-Length", "4")
conn.endheaders()
conn.send(ntob("d'oh"))
response = conn.response_class(conn.sock, method="POST")
version, status, reason = response._read_status()
self.assertNotEqual(status, 100)
conn.close()
# Now try a page with an Expect header...
conn.connect()
conn.putrequest("POST", "/upload", skip_host=True)
conn.putheader("Host", self.HOST)
conn.putheader("Content-Type", "text/plain")
conn.putheader("Content-Length", "17")
conn.putheader("Expect", "100-continue")
conn.endheaders()
response = conn.response_class(conn.sock, method="POST")
# ...assert and then skip the 100 response
version, status, reason = response._read_status()
self.assertEqual(status, 100)
while True:
line = response.fp.readline().strip()
if line:
self.fail("100 Continue should not output any headers. Got %r" % line)
else:
break
# ...send the body
body = ntob("I am a small file")
conn.send(body)
# ...get the final response
response.begin()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(200)
self.assertBody("thanks for '%s'" % body)
conn.close()
class ConnectionTests(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test_readall_or_close(self):
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
self.PROTOCOL = "HTTP/1.1"
if self.scheme == "https":
self.HTTP_CONN = HTTPSConnection
else:
self.HTTP_CONN = HTTPConnection
# Test a max of 0 (the default) and then reset to what it was above.
old_max = cherrypy.server.max_request_body_size
for new_max in (0, old_max):
cherrypy.server.max_request_body_size = new_max
self.persistent = True
conn = self.HTTP_CONN
# Get a POST page with an error
conn.putrequest("POST", "/err_before_read", skip_host=True)
conn.putheader("Host", self.HOST)
conn.putheader("Content-Type", "text/plain")
conn.putheader("Content-Length", "1000")
conn.putheader("Expect", "100-continue")
conn.endheaders()
response = conn.response_class(conn.sock, method="POST")
# ...assert and then skip the 100 response
version, status, reason = response._read_status()
self.assertEqual(status, 100)
while True:
skip = response.fp.readline().strip()
if not skip:
break
# ...send the body
conn.send(ntob("x" * 1000))
# ...get the final response
response.begin()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(500)
# Now try a working page with an Expect header...
conn._output(ntob('POST /upload HTTP/1.1'))
conn._output(ntob("Host: %s" % self.HOST, 'ascii'))
conn._output(ntob("Content-Type: text/plain"))
conn._output(ntob("Content-Length: 17"))
conn._output(ntob("Expect: 100-continue"))
conn._send_output()
response = conn.response_class(conn.sock, method="POST")
# ...assert and then skip the 100 response
version, status, reason = response._read_status()
self.assertEqual(status, 100)
while True:
skip = response.fp.readline().strip()
if not skip:
break
# ...send the body
body = ntob("I am a small file")
conn.send(body)
# ...get the final response
response.begin()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(200)
self.assertBody("thanks for '%s'" % body)
conn.close()
def test_No_Message_Body(self):
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
self.PROTOCOL = "HTTP/1.1"
# Set our HTTP_CONN to an instance so it persists between requests.
self.persistent = True
# Make the first request and assert there's no "Connection: close".
self.getPage("/")
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertNoHeader("Connection")
# Make a 204 request on the same connection.
self.getPage("/custom/204")
self.assertStatus(204)
self.assertNoHeader("Content-Length")
self.assertBody("")
self.assertNoHeader("Connection")
# Make a 304 request on the same connection.
self.getPage("/custom/304")
self.assertStatus(304)
self.assertNoHeader("Content-Length")
self.assertBody("")
self.assertNoHeader("Connection")
def test_Chunked_Encoding(self):
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
if (hasattr(self, 'harness') and
"modpython" in self.harness.__class__.__name__.lower()):
# mod_python forbids chunked encoding
return self.skip()
self.PROTOCOL = "HTTP/1.1"
# Set our HTTP_CONN to an instance so it persists between requests.
self.persistent = True
conn = self.HTTP_CONN
# Try a normal chunked request (with extensions)
body = ntob("8;key=value\r\nxx\r\nxxxx\r\n5\r\nyyyyy\r\n0\r\n"
"Content-Type: application/json\r\n"
"\r\n")
conn.putrequest("POST", "/upload", skip_host=True)
conn.putheader("Host", self.HOST)
conn.putheader("Transfer-Encoding", "chunked")
conn.putheader("Trailer", "Content-Type")
# Note that this is somewhat malformed:
# we shouldn't be sending Content-Length.
# RFC 2616 says the server should ignore it.
conn.putheader("Content-Length", "3")
conn.endheaders()
conn.send(body)
response = conn.getresponse()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus('200 OK')
self.assertBody("thanks for '%s'" % ntob('xx\r\nxxxxyyyyy'))
# Try a chunked request that exceeds server.max_request_body_size.
# Note that the delimiters and trailer are included.
body = ntob("3e3\r\n" + ("x" * 995) + "\r\n0\r\n\r\n")
conn.putrequest("POST", "/upload", skip_host=True)
conn.putheader("Host", self.HOST)
conn.putheader("Transfer-Encoding", "chunked")
conn.putheader("Content-Type", "text/plain")
# Chunked requests don't need a content-length
## conn.putheader("Content-Length", len(body))
conn.endheaders()
conn.send(body)
response = conn.getresponse()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(413)
conn.close()
def test_Content_Length_in(self):
# Try a non-chunked request where Content-Length exceeds
# server.max_request_body_size. Assert error before body send.
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest("POST", "/upload", skip_host=True)
conn.putheader("Host", self.HOST)
conn.putheader("Content-Type", "text/plain")
conn.putheader("Content-Length", "9999")
conn.endheaders()
response = conn.getresponse()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(413)
self.assertBody("The entity sent with the request exceeds "
"the maximum allowed bytes.")
conn.close()
def test_Content_Length_out_preheaders(self):
# Try a non-chunked response where Content-Length is less than
# the actual bytes in the response body.
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest("GET", "/custom_cl?body=I+have+too+many+bytes&cl=5",
skip_host=True)
conn.putheader("Host", self.HOST)
conn.endheaders()
response = conn.getresponse()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(500)
self.assertBody(
"The requested resource returned more bytes than the "
"declared Content-Length.")
conn.close()
def test_Content_Length_out_postheaders(self):
# Try a non-chunked response where Content-Length is less than
# the actual bytes in the response body.
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest("GET", "/custom_cl?body=I+too&body=+have+too+many&cl=5",
skip_host=True)
conn.putheader("Host", self.HOST)
conn.endheaders()
response = conn.getresponse()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(200)
self.assertBody("I too")
conn.close()
def test_598(self):
remote_data_conn = urlopen('%s://%s:%s/one_megabyte_of_a/' %
(self.scheme, self.HOST, self.PORT,))
buf = remote_data_conn.read(512)
time.sleep(timeout * 0.6)
remaining = (1024 * 1024) - 512
while remaining:
data = remote_data_conn.read(remaining)
if not data:
break
else:
buf += data
remaining -= len(data)
self.assertEqual(len(buf), 1024 * 1024)
self.assertEqual(buf, ntob("a" * 1024 * 1024))
self.assertEqual(remaining, 0)
remote_data_conn.close()
class BadRequestTests(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test_No_CRLF(self):
self.persistent = True
conn = self.HTTP_CONN
conn.send(ntob('GET /hello HTTP/1.1\n\n'))
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.body = response.read()
self.assertBody("HTTP requires CRLF terminators")
conn.close()
conn.connect()
conn.send(ntob('GET /hello HTTP/1.1\r\n\n'))
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.body = response.read()
self.assertBody("HTTP requires CRLF terminators")
conn.close()
|
2014c2g2/w16b_test | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/xml/dom/__init__.py | 873 | """W3C Document Object Model implementation for Python.
The Python mapping of the Document Object Model is documented in the
Python Library Reference in the section on the xml.dom package.
This package contains the following modules:
minidom -- A simple implementation of the Level 1 DOM with namespace
support added (based on the Level 2 specification) and other
minor Level 2 functionality.
pulldom -- DOM builder supporting on-demand tree-building for selected
subtrees of the document.
"""
class Node:
"""Class giving the NodeType constants."""
__slots__ = ()
# DOM implementations may use this as a base class for their own
# Node implementations. If they don't, the constants defined here
# should still be used as the canonical definitions as they match
# the values given in the W3C recommendation. Client code can
# safely refer to these values in all tests of Node.nodeType
# values.
ELEMENT_NODE = 1
ATTRIBUTE_NODE = 2
TEXT_NODE = 3
CDATA_SECTION_NODE = 4
ENTITY_REFERENCE_NODE = 5
ENTITY_NODE = 6
PROCESSING_INSTRUCTION_NODE = 7
COMMENT_NODE = 8
DOCUMENT_NODE = 9
DOCUMENT_TYPE_NODE = 10
DOCUMENT_FRAGMENT_NODE = 11
NOTATION_NODE = 12
#ExceptionCode
INDEX_SIZE_ERR = 1
DOMSTRING_SIZE_ERR = 2
HIERARCHY_REQUEST_ERR = 3
WRONG_DOCUMENT_ERR = 4
INVALID_CHARACTER_ERR = 5
NO_DATA_ALLOWED_ERR = 6
NO_MODIFICATION_ALLOWED_ERR = 7
NOT_FOUND_ERR = 8
NOT_SUPPORTED_ERR = 9
INUSE_ATTRIBUTE_ERR = 10
INVALID_STATE_ERR = 11
SYNTAX_ERR = 12
INVALID_MODIFICATION_ERR = 13
NAMESPACE_ERR = 14
INVALID_ACCESS_ERR = 15
VALIDATION_ERR = 16
class DOMException(Exception):
"""Abstract base class for DOM exceptions.
Exceptions with specific codes are specializations of this class."""
def __init__(self, *args, **kw):
if self.__class__ is DOMException:
raise RuntimeError(
"DOMException should not be instantiated directly")
Exception.__init__(self, *args, **kw)
def _get_code(self):
return self.code
class IndexSizeErr(DOMException):
code = INDEX_SIZE_ERR
class DomstringSizeErr(DOMException):
code = DOMSTRING_SIZE_ERR
class HierarchyRequestErr(DOMException):
code = HIERARCHY_REQUEST_ERR
class WrongDocumentErr(DOMException):
code = WRONG_DOCUMENT_ERR
class InvalidCharacterErr(DOMException):
code = INVALID_CHARACTER_ERR
class NoDataAllowedErr(DOMException):
code = NO_DATA_ALLOWED_ERR
class NoModificationAllowedErr(DOMException):
code = NO_MODIFICATION_ALLOWED_ERR
class NotFoundErr(DOMException):
code = NOT_FOUND_ERR
class NotSupportedErr(DOMException):
code = NOT_SUPPORTED_ERR
class InuseAttributeErr(DOMException):
code = INUSE_ATTRIBUTE_ERR
class InvalidStateErr(DOMException):
code = INVALID_STATE_ERR
class SyntaxErr(DOMException):
code = SYNTAX_ERR
class InvalidModificationErr(DOMException):
code = INVALID_MODIFICATION_ERR
class NamespaceErr(DOMException):
code = NAMESPACE_ERR
class InvalidAccessErr(DOMException):
code = INVALID_ACCESS_ERR
class ValidationErr(DOMException):
code = VALIDATION_ERR
class UserDataHandler:
"""Class giving the operation constants for UserDataHandler.handle()."""
# Based on DOM Level 3 (WD 9 April 2002)
NODE_CLONED = 1
NODE_IMPORTED = 2
NODE_DELETED = 3
NODE_RENAMED = 4
XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace"
XMLNS_NAMESPACE = "http://www.w3.org/2000/xmlns/"
XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
EMPTY_NAMESPACE = None
EMPTY_PREFIX = None
from .domreg import getDOMImplementation, registerDOMImplementation
|
shsingh/ansible | refs/heads/devel | lib/ansible/modules/network/aireos/aireos_config.py | 6 | #!/usr/bin/python
#
# Copyright: Ansible Team
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: aireos_config
version_added: "2.4"
author: "James Mighion (@jmighion)"
short_description: Manage Cisco WLC configurations
description:
- AireOS does not use a block indent file syntax, so there are no sections or parents.
This module provides an implementation for working with AireOS configurations in
a deterministic way.
extends_documentation_fragment: aireos
options:
lines:
description:
- The ordered set of commands that should be configured.
The commands must be the exact same commands as found
in the device run-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
aliases: ['commands']
src:
description:
- Specifies the source path to the file that contains the configuration
or configuration template to load. The path to the source file can
either be the full path on the Ansible control host or a relative
path from the playbook or role root directory. This argument is mutually
exclusive with I(lines).
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line.
If match is set to I(none), the module will not attempt to
compare the source configuration with the running
configuration on the remote device.
default: line
choices: ['line', 'none']
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. If the C(backup_options) value is not given,
the backup file is written to the C(backup) folder in the playbook
root directory. If the directory does not exist, it is created.
type: bool
default: 'no'
running_config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(running_config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
aliases: ['config']
save:
description:
- The C(save) argument instructs the module to save the
running-config to startup-config. This operation is performed
after any changes are made to the current running config. If
no changes are made, the configuration is still saved to the
startup config. This option will always cause the module to
return changed. This argument is mutually exclusive with I(save_when).
- This option is deprecated as of Ansible 2.7, use C(save_when)
type: bool
default: 'no'
save_when:
description:
- When changes are made to the device running-configuration, the
changes are not copied to non-volatile storage by default. Using
this argument will change that. If the argument is set to
I(always), then the running-config will always be copied to the
startup-config and the module will always return as changed.
If the argument is set to I(never), the running-config will never
be copied to the startup-config. If the argument is set to I(changed),
then the running-config will only be copied to the startup-config if
the task has made a change.
default: never
choices: ['always', 'never', 'changed']
version_added: "2.7"
diff_against:
description:
- When using the C(ansible-playbook --diff) command line argument
the module can generate diffs against different sources.
- When this option is configured as I(intended), the module will
return the diff of the running-config against the configuration
provided in the C(intended_config) argument.
- When this option is configured as I(running), the module will
return the before and after diff of the running-config with respect
to any changes made to the device configuration.
choices: ['intended', 'running']
diff_ignore_lines:
description:
- Use this argument to specify one or more lines that should be
ignored during the diff. This is used for lines in the configuration
that are automatically updated by the system. This argument takes
a list of regular expressions or exact line matches.
intended_config:
description:
- The C(intended_config) provides the master configuration that
the node should conform to and is used to check the final
running-config against. This argument will not modify any settings
on the remote device and is strictly used to check the compliance
of the current device's configuration against. When specifying this
argument, the task should also modify the C(diff_against) value and
set it to I(intended).
backup_options:
description:
- This is a dict object containing configurable options related to backup file path.
The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set
to I(no) this option will be silently ignored.
suboptions:
filename:
description:
- The filename to be used to store the backup configuration. If the filename
is not given it will be generated based on the hostname, current time and date
in format defined by <hostname>_config.<current-date>@<current-time>
dir_path:
description:
- This option provides the path ending with directory name in which the backup
configuration file will be stored. If the directory does not exist it will be first
created and the filename is either the value of C(filename) or default filename
as described in C(filename) options description. If the path value is not given
in that case a I(backup) directory will be created in the current working directory
and backup configuration will be copied in C(filename) within I(backup) directory.
type: path
type: dict
version_added: "2.8"
"""
EXAMPLES = """
- name: configure configuration
aireos_config:
lines: sysname testDevice
- name: diff the running-config against a provided config
aireos_config:
diff_against: intended
intended: "{{ lookup('file', 'master.cfg') }}"
- name: load new acl into device
aireos_config:
lines:
- acl create testACL
- acl rule protocol testACL 1 any
- acl rule direction testACL 3 in
before: acl delete testACL
- name: configurable backup path
aireos_config:
backup: yes
lines: sysname testDevice
backup_options:
filename: backup.cfg
dir_path: /home/user
"""
RETURN = """
commands:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['hostname foo', 'vlan 1', 'name default']
updates:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['hostname foo', 'vlan 1', 'name default']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: str
sample: /playbooks/ansible/backup/aireos_config.2016-07-16@22:28:34
"""
from ansible.module_utils.network.aireos.aireos import run_commands, get_config, load_config
from ansible.module_utils.network.aireos.aireos import aireos_argument_spec
from ansible.module_utils.network.aireos.aireos import check_args as aireos_check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import NetworkConfig, dumps
def get_running_config(module, config=None):
contents = module.params['running_config']
if not contents:
if config:
contents = config
else:
contents = get_config(module)
return NetworkConfig(indent=1, contents=contents)
def get_candidate(module):
candidate = NetworkConfig(indent=1)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
candidate.add(module.params['lines'])
return candidate
def save_config(module, result):
result['changed'] = True
if not module.check_mode:
command = {"command": "save config", "prompt": "Are you sure you want to save", "answer": "y"}
run_commands(module, command)
else:
module.warn('Skipping command `save config` due to check_mode. Configuration not copied to '
'non-volatile storage')
def main():
""" main entry point for module execution
"""
backup_spec = dict(
filename=dict(),
dir_path=dict(type='path')
)
argument_spec = dict(
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'none']),
running_config=dict(aliases=['config']),
intended_config=dict(),
backup=dict(type='bool', default=False),
backup_options=dict(type='dict', options=backup_spec),
# save is deprecated as of 2.7, use save_when instead
save=dict(type='bool', default=False, removed_in_version='2.11'),
save_when=dict(choices=['always', 'never', 'changed'], default='never'),
diff_against=dict(choices=['running', 'intended']),
diff_ignore_lines=dict(type='list')
)
argument_spec.update(aireos_argument_spec)
mutually_exclusive = [('lines', 'src'),
('save', 'save_when')]
required_if = [('diff_against', 'intended', ['intended_config'])]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
warnings = list()
aireos_check_args(module, warnings)
result = {'changed': False, 'warnings': warnings}
config = None
if module.params['backup'] or (module._diff and module.params['diff_against'] == 'running'):
contents = get_config(module)
config = NetworkConfig(indent=1, contents=contents)
if module.params['backup']:
result['__backup__'] = contents
if any((module.params['src'], module.params['lines'])):
match = module.params['match']
candidate = get_candidate(module)
if match != 'none':
config = get_running_config(module, config)
configobjs = candidate.difference(config, match=match)
else:
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands').split('\n')
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['commands'] = commands
result['updates'] = commands
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
diff_ignore_lines = module.params['diff_ignore_lines']
if module.params['save_when'] == 'always' or module.params['save']:
save_config(module, result)
elif module.params['save_when'] == 'changed' and result['changed']:
save_config(module, result)
if module._diff:
output = run_commands(module, 'show run-config commands')
contents = output[0]
# recreate the object in order to process diff_ignore_lines
running_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines)
if module.params['diff_against'] == 'running':
if module.check_mode:
module.warn("unable to perform diff against running-config due to check mode")
contents = None
else:
contents = config.config_text
elif module.params['diff_against'] == 'intended':
contents = module.params['intended_config']
if contents is not None:
base_config = NetworkConfig(indent=1, contents=contents, ignore_lines=diff_ignore_lines)
if running_config.sha1 != base_config.sha1:
result.update({
'changed': True,
'diff': {'before': str(base_config), 'after': str(running_config)}
})
module.exit_json(**result)
if __name__ == '__main__':
main()
|
FreeAgent/djangoappengine-starter | refs/heads/master | django/contrib/admin/filterspecs.py | 148 | """
FilterSpec encapsulates the logic for displaying filters in the Django admin.
Filters are specified in models with the "list_filter" option.
Each filter subclass knows how to display a filter for a field that passes a
certain test -- e.g. being a DateField or ForeignKey.
"""
from django.db import models
from django.utils.encoding import smart_unicode, iri_to_uri
from django.utils.translation import ugettext as _
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.contrib.admin.util import get_model_from_relation, \
reverse_field_path, get_limit_choices_to_from_path
import datetime
class FilterSpec(object):
filter_specs = []
def __init__(self, f, request, params, model, model_admin,
field_path=None):
self.field = f
self.params = params
self.field_path = field_path
if field_path is None:
if isinstance(f, models.related.RelatedObject):
self.field_path = f.var_name
else:
self.field_path = f.name
def register(cls, test, factory):
cls.filter_specs.append((test, factory))
register = classmethod(register)
def create(cls, f, request, params, model, model_admin, field_path=None):
for test, factory in cls.filter_specs:
if test(f):
return factory(f, request, params, model, model_admin,
field_path=field_path)
create = classmethod(create)
def has_output(self):
return True
def choices(self, cl):
raise NotImplementedError()
def title(self):
return self.field.verbose_name
def output(self, cl):
t = []
if self.has_output():
t.append(_(u'<h3>By %s:</h3>\n<ul>\n') % escape(self.title()))
for choice in self.choices(cl):
t.append(u'<li%s><a href="%s">%s</a></li>\n' % \
((choice['selected'] and ' class="selected"' or ''),
iri_to_uri(choice['query_string']),
choice['display']))
t.append('</ul>\n\n')
return mark_safe("".join(t))
class RelatedFilterSpec(FilterSpec):
def __init__(self, f, request, params, model, model_admin,
field_path=None):
super(RelatedFilterSpec, self).__init__(
f, request, params, model, model_admin, field_path=field_path)
other_model = get_model_from_relation(f)
if isinstance(f, (models.ManyToManyField,
models.related.RelatedObject)):
# no direct field on this model, get name from other model
self.lookup_title = other_model._meta.verbose_name
else:
self.lookup_title = f.verbose_name # use field name
rel_name = other_model._meta.pk.name
self.lookup_kwarg = '%s__%s__exact' % (self.field_path, rel_name)
self.lookup_kwarg_isnull = '%s__isnull' % (self.field_path)
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
self.lookup_val_isnull = request.GET.get(
self.lookup_kwarg_isnull, None)
self.lookup_choices = f.get_choices(include_blank=False)
def has_output(self):
if isinstance(self.field, models.related.RelatedObject) \
and self.field.field.null or hasattr(self.field, 'rel') \
and self.field.null:
extra = 1
else:
extra = 0
return len(self.lookup_choices) + extra > 1
def title(self):
return self.lookup_title
def choices(self, cl):
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
yield {'selected': self.lookup_val is None
and not self.lookup_val_isnull,
'query_string': cl.get_query_string(
{},
[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All')}
for pk_val, val in self.lookup_choices:
yield {'selected': self.lookup_val == smart_unicode(pk_val),
'query_string': cl.get_query_string(
{self.lookup_kwarg: pk_val},
[self.lookup_kwarg_isnull]),
'display': val}
if isinstance(self.field, models.related.RelatedObject) \
and self.field.field.null or hasattr(self.field, 'rel') \
and self.field.null:
yield {'selected': bool(self.lookup_val_isnull),
'query_string': cl.get_query_string(
{self.lookup_kwarg_isnull: 'True'},
[self.lookup_kwarg]),
'display': EMPTY_CHANGELIST_VALUE}
FilterSpec.register(lambda f: (
hasattr(f, 'rel') and bool(f.rel) or
isinstance(f, models.related.RelatedObject)), RelatedFilterSpec)
class BooleanFieldFilterSpec(FilterSpec):
def __init__(self, f, request, params, model, model_admin,
field_path=None):
super(BooleanFieldFilterSpec, self).__init__(f, request, params, model,
model_admin,
field_path=field_path)
self.lookup_kwarg = '%s__exact' % self.field_path
self.lookup_kwarg2 = '%s__isnull' % self.field_path
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
self.lookup_val2 = request.GET.get(self.lookup_kwarg2, None)
def title(self):
return self.field.verbose_name
def choices(self, cl):
for k, v in ((_('All'), None), (_('Yes'), '1'), (_('No'), '0')):
yield {'selected': self.lookup_val == v and not self.lookup_val2,
'query_string': cl.get_query_string(
{self.lookup_kwarg: v},
[self.lookup_kwarg2]),
'display': k}
if isinstance(self.field, models.NullBooleanField):
yield {'selected': self.lookup_val2 == 'True',
'query_string': cl.get_query_string(
{self.lookup_kwarg2: 'True'},
[self.lookup_kwarg]),
'display': _('Unknown')}
FilterSpec.register(lambda f: isinstance(f, models.BooleanField)
or isinstance(f, models.NullBooleanField),
BooleanFieldFilterSpec)
class ChoicesFilterSpec(FilterSpec):
def __init__(self, f, request, params, model, model_admin,
field_path=None):
super(ChoicesFilterSpec, self).__init__(f, request, params, model,
model_admin,
field_path=field_path)
self.lookup_kwarg = '%s__exact' % self.field_path
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
def choices(self, cl):
yield {'selected': self.lookup_val is None,
'query_string': cl.get_query_string({}, [self.lookup_kwarg]),
'display': _('All')}
for k, v in self.field.flatchoices:
yield {'selected': smart_unicode(k) == self.lookup_val,
'query_string': cl.get_query_string(
{self.lookup_kwarg: k}),
'display': v}
FilterSpec.register(lambda f: bool(f.choices), ChoicesFilterSpec)
class DateFieldFilterSpec(FilterSpec):
def __init__(self, f, request, params, model, model_admin,
field_path=None):
super(DateFieldFilterSpec, self).__init__(f, request, params, model,
model_admin,
field_path=field_path)
self.field_generic = '%s__' % self.field_path
self.date_params = dict([(k, v) for k, v in params.items()
if k.startswith(self.field_generic)])
today = datetime.date.today()
one_week_ago = today - datetime.timedelta(days=7)
today_str = isinstance(self.field, models.DateTimeField) \
and today.strftime('%Y-%m-%d 23:59:59') \
or today.strftime('%Y-%m-%d')
self.links = (
(_('Any date'), {}),
(_('Today'), {'%s__year' % self.field_path: str(today.year),
'%s__month' % self.field_path: str(today.month),
'%s__day' % self.field_path: str(today.day)}),
(_('Past 7 days'), {'%s__gte' % self.field_path:
one_week_ago.strftime('%Y-%m-%d'),
'%s__lte' % self.field_path: today_str}),
(_('This month'), {'%s__year' % self.field_path: str(today.year),
'%s__month' % self.field_path: str(today.month)}),
(_('This year'), {'%s__year' % self.field_path: str(today.year)})
)
def title(self):
return self.field.verbose_name
def choices(self, cl):
for title, param_dict in self.links:
yield {'selected': self.date_params == param_dict,
'query_string': cl.get_query_string(
param_dict,
[self.field_generic]),
'display': title}
FilterSpec.register(lambda f: isinstance(f, models.DateField),
DateFieldFilterSpec)
# This should be registered last, because it's a last resort. For example,
# if a field is eligible to use the BooleanFieldFilterSpec, that'd be much
# more appropriate, and the AllValuesFilterSpec won't get used for it.
class AllValuesFilterSpec(FilterSpec):
def __init__(self, f, request, params, model, model_admin,
field_path=None):
super(AllValuesFilterSpec, self).__init__(f, request, params, model,
model_admin,
field_path=field_path)
self.lookup_kwarg = self.field_path
self.lookup_kwarg_isnull = '%s__isnull' % self.field_path
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull,
None)
parent_model, reverse_path = reverse_field_path(model, self.field_path)
queryset = parent_model._default_manager.all()
# optional feature: limit choices base on existing relationships
# queryset = queryset.complex_filter(
# {'%s__isnull' % reverse_path: False})
limit_choices_to = get_limit_choices_to_from_path(model, field_path)
queryset = queryset.filter(limit_choices_to)
self.lookup_choices = \
queryset.distinct().order_by(f.name).values_list(f.name, flat=True)
def title(self):
return self.field.verbose_name
def choices(self, cl):
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
yield {'selected': self.lookup_val is None
and self.lookup_val_isnull is None,
'query_string': cl.get_query_string(
{},
[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All')}
include_none = False
for val in self.lookup_choices:
if val is None:
include_none = True
continue
val = smart_unicode(val)
yield {'selected': self.lookup_val == val,
'query_string': cl.get_query_string(
{self.lookup_kwarg: val},
[self.lookup_kwarg_isnull]),
'display': val}
if include_none:
yield {'selected': bool(self.lookup_val_isnull),
'query_string': cl.get_query_string(
{self.lookup_kwarg_isnull: 'True'},
[self.lookup_kwarg]),
'display': EMPTY_CHANGELIST_VALUE}
FilterSpec.register(lambda f: True, AllValuesFilterSpec)
|
agrimaldi/metaseq | refs/heads/master | metaseq/tableprinter.py | 2 | def print_2x2_table(table, row_labels, col_labels, fmt="%d"):
"""
Prints a table used for Fisher's exact test. Adds row, column, and grand
totals.
:param table: The four cells of a 2x2 table: [r1c1, r1c2, r2c1, r2c2]
:param row_labels: A length-2 list of row names
:param col_labels: A length-2 list of column names
"""
grand = sum(table)
# Separate table into components and get row/col sums
t11, t12, t21, t22 = table
# Row sums, col sums, and grand total
r1 = t11 + t12
r2 = t21 + t22
c1 = t11 + t21
c2 = t12 + t22
# Re-cast everything as the appropriate format
t11, t12, t21, t22, c1, c2, r1, r2, grand = [
fmt % i for i in [t11, t12, t21, t22, c1, c2, r1, r2, grand]]
# Construct rows and columns the long way...
rows = [
[""] + col_labels + ['total'],
[row_labels[0], t11, t12, r1],
[row_labels[1], t21, t22, r2],
['total', c1, c2, grand],
]
cols = [
[row[0] for row in rows],
[col_labels[0], t11, t21, c1],
[col_labels[1], t12, t22, c2],
['total', r1, r2, grand],
]
# Get max column width for each column; need this for nice justification
widths = []
for col in cols:
widths.append(max(len(i) for i in col))
# ReST-formatted header
sep = ['=' * i for i in widths]
# Construct the table one row at a time with nice justification
s = []
s.append(' '.join(sep))
s.append(' '.join(i.ljust(j) for i, j in zip(rows[0], widths)))
s.append(' '.join(sep))
for row in rows[1:]:
s.append(' '.join(i.ljust(j) for i, j in zip(row, widths)))
s.append(' '.join(sep) + '\n')
return "\n".join(s)
def print_row_perc_table(table, row_labels, col_labels):
"""
given a table, print the percentages rather than the totals
"""
r1c1, r1c2, r2c1, r2c2 = map(float, table)
row1 = r1c1 + r1c2
row2 = r2c1 + r2c2
new_table = [
r1c1 / row1,
r1c2 / row1,
r2c1 / row2,
r2c2 / row2]
s = print_2x2_table(new_table, row_labels, col_labels, fmt="%.2f")
s = s.splitlines(True)
del s[5]
return ''.join(s)
def print_col_perc_table(table, row_labels, col_labels):
"""
given a table, print the cols as percentages
"""
r1c1, r1c2, r2c1, r2c2 = map(float, table)
col1 = r1c1 + r2c1
col2 = r1c2 + r2c2
new_table = [
r1c1 / col1,
r1c2 / col2,
r2c1 / col1,
r2c2 / col2]
s = print_2x2_table(new_table, row_labels, col_labels, fmt="%.2f")
s = s.splitlines(False)
last_space = s[0].rindex(" ")
new_s = [i[:last_space] for i in s]
return '\n'.join(new_s)
def table_maker(subset, ind1, ind2, row_labels, col_labels, title):
"""
`subset` provides a subsetted boolean of items to consider. If no subset,
you can use all with `np.ones_like(ind1) == 1`
`ind1` is used to subset rows, e.g., log2fc > 0. This is used for rows, so
row_label might be ['upregulated', 'others']
`ind2` is used to subset cols. For example, col_labels would be
['bound', 'unbound']
"""
table = [
sum(subset & ind1 & ind2),
sum(subset & ind1 & ~ind2),
sum(subset & ~ind1 & ind2),
sum(subset & ~ind1 & ~ind2)
]
print
print title
print '-' * len(title)
print print_2x2_table(table, row_labels=row_labels, col_labels=col_labels)
print print_row_perc_table(
table, row_labels=row_labels, col_labels=col_labels)
print print_col_perc_table(
table, row_labels=row_labels, col_labels=col_labels)
print fisher.pvalue(*table)
if __name__ == "__main__":
table = [12, 5, 29, 2]
s = print_2x2_table(
table,
row_labels=['Selected', 'Not selected'],
col_labels=['Having the property', 'Not having the property']
)
str_table = """
============ =================== ======================= =====
Having the property Not having the property total
============ =================== ======================= =====
Selected 12 5 17
Not selected 29 2 31
total 41 7 48
============ =================== ======================= =====
"""
# For the test, remove the first newline and all common leading whitespace
from textwrap import dedent
str_table = "".join(str_table.splitlines(True)[1:])
print s
assert dedent(str_table) == s
|
nguyenquach/pychess | refs/heads/master | lib/pychess/System/protoopen.py | 22 | import os
from pychess.compat import open, urlopen, url2pathname
PGN_ENCODING = "latin_1"
def splitUri (uri):
uri = url2pathname(uri) # escape special chars
uri = uri.strip('\r\n\x00') # remove \r\n and NULL
return uri.split("://")
def protoopen (uri):
""" Function for opening many things """
try:
return open(uri, "rU", encoding=PGN_ENCODING)
except (IOError, OSError):
pass
try:
return urlopen(uri)
except (IOError, OSError):
pass
raise IOError("Protocol isn't supported by pychess")
def protosave (uri, append=False):
""" Function for saving many things """
splitted = splitUri(uri)
if splitted[0] == "file":
if append:
return open(splitted[1], "a", encoding=PGN_ENCODING)
return open(splitted[1], "w")
elif len(splitted) == 1:
if append:
return open(splitted[0], "a", encoding=PGN_ENCODING)
return open(splitted[0], "w", encoding=PGN_ENCODING)
raise IOError("PyChess doesn't support writing to protocol")
def isWriteable (uri):
""" Returns true if protoopen can open a write pipe to the uri """
splitted = splitUri(uri)
if splitted[0] == "file":
return os.access (splitted[1], os.W_OK)
elif len(splitted) == 1:
return os.access (splitted[0], os.W_OK)
return False
|
lizardsystem/lizard-reportgenerator | refs/heads/master | lizard_reportgenerator/admin.py | 1 | from django.contrib import admin
from lizard_reportgenerator.models import GeneratedReport
from lizard_reportgenerator.models import ReportTemplate
from lizard_security.admin import SecurityFilteredAdmin
class GeneratedReportInline(admin.TabularInline):
model = GeneratedReport
class ReportTemplateAdmin(admin.ModelAdmin):
list_display = ('name',
'slug',
'kind',
'generation_module',
'generation_function',
'extra_arguments',
'rtf_support',
'doc_support',
'pdf_support',
'csv_support',
'xls_support')
class GeneratedReportAdmin(SecurityFilteredAdmin):
list_display = ('doc_document',
'pdf_document',
'xls_document',
'csv_document',
'template',
'area',
'generated_on',
'visible')
admin.site.register(ReportTemplate, ReportTemplateAdmin)
admin.site.register(GeneratedReport, GeneratedReportAdmin)
|
adampresley/sublime-debugkiller | refs/heads/master | DebugKillerCommand.py | 1 | #
# History:
#
# 2013-03-09:
# -
#
# 2013-03-08:
# - Initial release
#
import os
import re
import sublime
import sublime_plugin
OUTPUT_VIEW_NAME = "debugkiller_result_view"
class DebugKillerCommand(sublime_plugin.WindowCommand):
def run(self):
self.view = self.window.active_view()
self.filePath = self.view.file_name()
self.fileName = os.path.basename(self.filePath)
self.scope = self.view.syntax_name(self.view.sel()[0].b).strip().split(" ")
self.settings = sublime.load_settings("DebugKiller.sublime-settings")
self.projectSettings = self.view.settings().get("sublime-debugkiller")
self.allPatterns = self.settings.get("patterns")
self.patterns = []
if self.projectSettings:
print "Project settings found. Loading %s pattern(s)..." % len(self.projectSettings["patterns"])
for p in self.projectSettings["patterns"]:
self.allPatterns.append(p)
#
# Filter our patterns by our scope
#
for p in self.allPatterns:
for s in p["scopes"]:
if s in self.scope:
self.patterns.append(p)
break
print ""
print "All patterns: %s" % self.allPatterns
print "Scope: %s" % self.scope
print "Patterns: %s" % self.patterns
print ""
#
# Configure the output view, perform operation search and destroy
#
self.configureOutputView()
self.searchAndDestroy(self.filePath)
self.showView()
def searchAndDestroy(self, file):
print "Debug Killer initializing %s directive%s..." % (len(self.patterns), "s" if len(self.patterns) > 1 else "")
self.writeToView("Debug Killer initializing %s directives...\n\n" % len(self.patterns))
lineNum = 0
f = open(file, "r")
#
# Search each line for patterns of items we wish to target
#
for line in f:
lineNum += 1
for pattern in self.patterns:
for match in re.finditer(pattern["pattern"], line, re.IGNORECASE):
msg = "Target found: %s:%s : %s" % (lineNum, match.start(), match.group(0))
self.writeToView(msg + "\n")
f.close()
self.writeToView("\n>> Objective complete. Click on the items above to highlight the located line\n")
def configureOutputView(self):
if not hasattr(self, "outputView"):
self.outputView = self.window.get_output_panel(OUTPUT_VIEW_NAME)
self.outputView.set_name(OUTPUT_VIEW_NAME)
self.clearView()
self.outputView.settings().set("file_path", self.filePath)
def clearView(self):
self.outputView.set_read_only(False)
edit = self.outputView.begin_edit()
self.outputView.erase(edit, sublime.Region(0, self.outputView.size()))
self.outputView.end_edit(edit)
self.outputView.set_read_only(True)
def writeToView(self, msg):
self.outputView.set_read_only(False)
edit = self.outputView.begin_edit()
self.outputView.insert(edit, self.outputView.size(), msg)
self.outputView.end_edit(edit)
self.outputView.set_read_only(True)
def showView(self):
self.window.run_command("show_panel", { "panel": "output." + OUTPUT_VIEW_NAME })
class FindConsoleLogEventListener(sublime_plugin.EventListener):
disabled = False
def __init__(self):
self.previousInstance = None
self.fileView = None
def on_selection_modified(self, view):
if FindConsoleLogEventListener.disabled:
return
if view.name() != OUTPUT_VIEW_NAME:
return
region = view.line(view.sel()[0])
#
# Make sure call once.
#
if self.previousInstance == region:
return
self.previousInstance = region
#
# Extract line from console result.
#
text = view.substr(region).split(":")
if len(text) < 3:
return
#
# Highlight the selected line
#
line = text[1].strip()
col = text[2].strip()
view.add_regions(OUTPUT_VIEW_NAME, [ region ], "comment")
#
# Find the file view.
#
filePath = view.settings().get("file_path")
window = sublime.active_window()
fileView = None
for v in window.views():
if v.file_name() == filePath:
fileView = v
break
if fileView == None:
return
self.fileView = fileView
window.focus_view(fileView)
fileView.run_command("goto_line", {"line": line})
fileRegion = fileView.line(fileView.sel()[0])
#
# Highlight fileView line
#
fileView.add_regions(OUTPUT_VIEW_NAME, [ fileRegion ], "string")
def on_deactivated(self, view):
if view.name() != OUTPUT_VIEW_NAME:
return
self.previousInstance = None
if self.fileView:
self.fileView.erase_regions(OUTPUT_VIEW_NAME)
|
peak6/st2 | refs/heads/master | st2common/st2common/models/db/runner.py | 5 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mongoengine as me
from st2common import log as logging
from st2common.models.db import MongoDBAccess
from st2common.models.db import stormbase
from st2common.constants.types import ResourceType
__all__ = [
'RunnerTypeDB',
]
LOG = logging.getLogger(__name__)
PACK_SEPARATOR = '.'
class RunnerTypeDB(stormbase.StormBaseDB, stormbase.UIDFieldMixin):
"""
The representation of an RunnerType in the system. An RunnerType
has a one-to-one mapping to a particular ActionRunner implementation.
Attributes:
id: See StormBaseAPI
name: See StormBaseAPI
description: See StormBaseAPI
enabled: A flag indicating whether the runner for this type is enabled.
runner_module: The python module that implements the action runner for this type.
runner_parameters: The specification for parameters for the action runner.
"""
RESOURCE_TYPE = ResourceType.RUNNER_TYPE
UID_FIELDS = ['name']
enabled = me.BooleanField(
required=True, default=True,
help_text='A flag indicating whether the runner for this type is enabled.')
runner_module = me.StringField(
required=True,
help_text='The python module that implements the action runner for this type.')
runner_parameters = me.DictField(
help_text='The specification for parameters for the action runner.')
query_module = me.StringField(
required=False,
help_text='The python module that implements the query module for this runner.')
meta = {
'indexes': stormbase.UIDFieldMixin.get_indexes()
}
def __init__(self, *args, **values):
super(RunnerTypeDB, self).__init__(*args, **values)
self.uid = self.get_uid()
# specialized access objects
runnertype_access = MongoDBAccess(RunnerTypeDB)
MODELS = [RunnerTypeDB]
|
ajose01/rethinkdb | refs/heads/next | external/v8_3.30.33.16/build/gyp/test/actions/gyptest-errors.py | 351 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies behavior for different action configuration errors:
exit status of 1, and the expected error message must be in stderr.
"""
import TestGyp
test = TestGyp.TestGyp(workdir='workarea_errors')
test.run_gyp('action_missing_name.gyp', chdir='src', status=1, stderr=None)
expect = [
"Anonymous action in target broken_actions2. An action must have an 'action_name' field.",
]
test.must_contain_all_lines(test.stderr(), expect)
test.pass_test()
|
makinacorpus/formhub | refs/heads/master | odk_viewer/migrations/0004_auto__add_field_datadictionary_shared.py | 7 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'DataDictionary.shared'
db.add_column('odk_viewer_datadictionary', 'shared', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
def backwards(self, orm):
# Deleting field 'DataDictionary.shared'
db.delete_column('odk_viewer_datadictionary', 'shared')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'odk_logger.instance': {
'Meta': {'object_name': 'Instance'},
'date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'submitted_via_web'", 'max_length': '20'}),
'survey_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['odk_logger.SurveyType']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': "orm['auth.User']"}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': "orm['odk_logger.XForm']"}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'odk_logger.surveytype': {
'Meta': {'object_name': 'SurveyType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'odk_logger.xform': {
'Meta': {'ordering': "('id_string',)", 'unique_together': "(('user', 'id_string'),)", 'object_name': 'XForm'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'downloadable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_string': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xforms'", 'null': 'True', 'to': "orm['auth.User']"}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'odk_viewer.columnrename': {
'Meta': {'object_name': 'ColumnRename'},
'column_name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'xpath': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'odk_viewer.datadictionary': {
'Meta': {'object_name': 'DataDictionary'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'json': ('django.db.models.fields.TextField', [], {}),
'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'xform': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'data_dictionary'", 'unique': 'True', 'to': "orm['odk_logger.XForm']"}),
'xls': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'})
},
'odk_viewer.instancemodification': {
'Meta': {'object_name': 'InstanceModification'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'modifications'", 'to': "orm['odk_logger.Instance']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'xpath': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'odk_viewer.parsedinstance': {
'Meta': {'object_name': 'ParsedInstance'},
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'parsed_instance'", 'unique': 'True', 'to': "orm['odk_logger.Instance']"}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'lng': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
}
}
complete_apps = ['odk_viewer']
|
numerical-mathematics/extrapolation | refs/heads/master | compare_test_dense.py | 1 | '''
Runs a dense output performance test comparing ParEx with
DOPRI5 and DOP853 integrators from scipy.integrate.ode
Result graphs are saved in the folder ./images
In this file only, we use a version of scipy.integrate.ode due to
James D. D. Martin (2015) that exposes the dense output option from the
Fortran code of DOPRI5 and DOP853 integrators. Please install
this version of scipy first before running this file.
James D. D. Martin version of scipy can be downloaded from
https://github.com/jddmartin/scipy/tree/dense_output_from_dopri5_and_dop853
Examples of the usage of dense output in scipy are in
https://github.com/jddmartin/dense_output_example_usage
you essentially need to run these commands:
$ sudo pip install cython numpy
$ sudo pip install git+https://github.com/jddmartin/scipy.git@dense_output_from_dopri5_and_dop853
if you want to revert back to the most recent version of scipy, run these commands
$ sudo pip uninstall scipy
$ sudo pip install scipy
'''
from __future__ import division
import numpy as np
import time
from scipy.integrate import ode, complex_ode, dense_dop
import ex_parallel as ex_p
import fnbod
class DenseSolout(object):
def __init__(self, ts):
self.dense_output = []
self.tindex = 0
self.ts = ts
def solout(self, nr, told, t, y, con):
if nr == 1: # initial conditions:
self.dense_output.append(y)
self.tindex += 1
else: # subsequent step positions (after starting point):
while self.tindex < len(self.ts) and t >= self.ts[self.tindex]:
yd = dense_dop(self.ts[self.tindex], told, t, con)
self.dense_output.append(yd)
self.tindex += 1
def relative_error(y, y_ref):
return np.linalg.norm(y-y_ref)/np.linalg.norm(y_ref)
def compare_performance_dense(func, y0, t, y_ref, problem_name, tol_boundary=(0,6), is_complex=False, nsteps=10e5, solout=(lambda y,t: y)):
print 'RUNNING COMPARISON TEST FOR ' + problem_name
tol = [1.e-3,1.e-5,1.e-7,1.e-9,1.e-11,1.e-13]
a, b = tol_boundary
tol = tol[a:b]
t0, tf = t[0], t[-1]
py_runtime = np.zeros(len(tol))
py_fe_seq = np.zeros(len(tol))
py_fe_tot = np.zeros(len(tol))
py_yerr = np.zeros(len(tol))
py_nstp = np.zeros(len(tol))
dopri5_runtime = np.zeros(len(tol))
dopri5_fe_seq = np.zeros(len(tol))
dopri5_fe_tot = np.zeros(len(tol))
dopri5_yerr = np.zeros(len(tol))
dopri5_nstp = np.zeros(len(tol))
dop853_runtime = np.zeros(len(tol))
dop853_fe_seq = np.zeros(len(tol))
dop853_fe_tot = np.zeros(len(tol))
dop853_yerr = np.zeros(len(tol))
dop853_nstp = np.zeros(len(tol))
# This is necessary because multiprocessing uses pickle, which can't handle
# Fortran function pointers
def func2(t,y):
return func(y,t)
for i in range(len(tol)):
print 'Tolerance: ', tol[i]
# run Python extrapolation code
print 'running ParEx'
start_time = time.time()
y, infodict = ex_p.ex_midpoint_explicit_parallel(func, None, y0, t, atol=tol[i], rtol=tol[i], mxstep=nsteps, adaptive="order", full_output=True)
py_runtime[i] = time.time() - start_time
py_fe_seq[i], py_fe_tot[i], py_nstp[i] = infodict['fe_seq'], infodict['nfe'], infodict['nst']
y[1:] = solout(y[1:],t[1:])
py_yerr[i] = relative_error(y[1:], y_ref)
print 'Runtime: ', py_runtime[i], ' s Error: ', py_yerr[i], ' fe_seq: ', py_fe_seq[i], ' fe_tot: ', py_fe_tot[i], ' nstp: ', py_nstp[i]
print ''
# run DOPRI5 (scipy)
print 'running DOPRI5 (scipy)'
dopri5_d_solout = DenseSolout(t)
start_time = time.time()
if is_complex:
r = complex_ode(func2).set_integrator('dopri5', atol=tol[i], rtol=tol[i], verbosity=10, nsteps=nsteps)
else:
r = ode(func2).set_integrator('dopri5', atol=tol[i], rtol=tol[i], verbosity=10, nsteps=nsteps)
r.set_solout(dopri5_d_solout.solout, dense_components=tuple(range(len(y0))))
r.set_initial_value(y0, t0)
r.integrate(r.t+(tf-t0))
assert r.t == tf, "Integration did not converge. Try increasing the max number of steps"
dopri5_runtime[i] = time.time() - start_time
y = np.array(dopri5_d_solout.dense_output)
y[1:] = solout(y[1:],t[1:])
dopri5_yerr[i] = relative_error(y[1:], y_ref)
print 'Runtime: ', dopri5_runtime[i], ' s Error: ', dopri5_yerr[i], ' fe_seq: ', dopri5_fe_seq[i], ' fe_tot: ', dopri5_fe_tot[i], ' nstp: ', dopri5_nstp[i]
print ''
# run DOP853 (scipy)
print 'running DOP853 (scipy)'
dop853_d_solout = DenseSolout(t)
start_time = time.time()
if is_complex:
r = complex_ode(func2, jac=None).set_integrator('dop853', atol=tol[i], rtol=tol[i], verbosity=10, nsteps=nsteps)
else:
r = ode(func2, jac=None).set_integrator('dop853', atol=tol[i], rtol=tol[i], verbosity=10, nsteps=nsteps)
r.set_solout(dop853_d_solout.solout, dense_components=tuple(range(len(y0))))
r.set_initial_value(y0, t0)
r.integrate(r.t+(tf-t0))
assert r.t == tf, "Integration did not converge. Try increasing the max number of steps"
dop853_runtime[i] = time.time() - start_time
y = np.array(dop853_d_solout.dense_output)
y[1:] = solout(y[1:],t[1:])
dop853_yerr[i] = relative_error(y[1:], y_ref)
print 'Runtime: ', dop853_runtime[i], ' s Error: ', dop853_yerr[i], ' fe_seq: ', dop853_fe_seq[i], ' fe_tot: ', dop853_fe_tot[i], ' nstp: ', dop853_nstp[i]
print ''
print ''
print "Final data: ParEx"
print py_runtime, py_fe_seq, py_fe_tot, py_yerr, py_nstp
print "Final data: DOPRI5 (scipy)"
print dopri5_runtime, dopri5_fe_seq, dopri5_fe_tot, dopri5_yerr, dopri5_nstp
print "Final data: DOP853 (scipy)"
print dop853_runtime, dop853_fe_seq, dop853_fe_tot, dop853_yerr, dop853_nstp
print ''
print ''
# plot performance graphs
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
plt.hold('true')
py_line, = plt.loglog(py_yerr, py_runtime, "s-")
dopri5_line, = plt.loglog(dopri5_yerr, dopri5_runtime, "s-")
dop853_line, = plt.loglog(dop853_yerr, dop853_runtime, "s-")
plt.legend([py_line, dopri5_line, dop853_line], ["ParEx", "DOPRI5 (scipy)", "DOP853 (scipy)"], loc=1)
plt.xlabel('Error')
plt.ylabel('Wall clock time (seconds)')
plt.title(problem_name)
plt.show()
plt.savefig('images/' + problem_name + '_err_vs_time.png')
plt.close()
###############################################################
###################### TEST PROBLEMS ##########################
###############################################################
###### N-Body Problem ######
def nbod_func(y,t):
return fnbod.fnbod(y,t)
def nbod_problem_dense(num):
t0 = 0
tf = 0.08
t = np.linspace(t0, tf, num=num, endpoint=True)
y0 = fnbod.init_fnbod(2400)
y_ref = np.loadtxt("reference_nbod_dense.txt")
compare_performance_dense(nbod_func, y0, t, y_ref, "nbod_problem_dense")
###### kdv Problem ######
def kdv_init(t0):
N = 256
k = np.array(range(0,int(N/2)) + [0] + range(-int(N/2)+1,0))
E_ = np.exp(-1j * k**3 * t0)
x = (2*np.pi/N)*np.arange(-int(N/2),int(N/2))
A = 25; B = 16;
u = 3*A**2/np.cosh(0.5*(A*(x+2.)))**2 + 3*B**2/np.cosh(0.5*(B*(x+1)))**2
U_hat = E_*np.fft.fft(u)
return U_hat
def kdv_func(U_hat, t):
# U_hat := exp(-i*k^3*t)*u_hat
N = 256
k = np.array(range(0,int(N/2)) + [0] + range(-int(N/2)+1,0))
E = np.exp(1j * k**3 * t)
E_ = np.exp(-1j * k**3 * t)
g = -0.5j * E_ * k
return g*np.fft.fft(np.real(np.fft.ifft(E*U_hat))**2)
def kdv_solout_dense(U_hat, ts):
y = np.zeros_like(U_hat)
N = 256
for i in range(len(ts)):
t = ts[i]
k = np.array(range(0,int(N/2)) + [0] + range(-int(N/2)+1,0))
E = np.exp(1j * k**3 * t)
y[i] = np.squeeze(np.real(np.fft.ifft(E*U_hat[i])))
return y
def kdv_problem_dense(num):
t0 = 0.
tf = 0.003
t = np.linspace(t0, tf, num=num, endpoint=True)
y0 = kdv_init(t0)
y_ref = np.loadtxt("reference_kdv_dense.txt")
compare_performance_dense(kdv_func, y0, t, y_ref, "kdv_problem_dense", is_complex=True, solout=kdv_solout_dense)
###### Burgers' Problem ######
def burgers_init(t0):
epslison = 0.1
N = 64
k = np.array(range(0,int(N/2)) + [0] + range(-int(N/2)+1,0))
E = np.exp(epslison * k**2 * t0)
x = (2*np.pi/N)*np.arange(-int(N/2),int(N/2))
u = np.sin(x)**2 * (x<0.)
# u = np.sin(x)**2
U_hat = E*np.fft.fft(u)
return U_hat
def burgers_func(U_hat, t):
# U_hat := exp(epslison*k^2*t)*u_hat
epslison = 0.1
N = 64
k = np.array(range(0,int(N/2)) + [0] + range(-int(N/2)+1,0))
E = np.exp(epslison * k**2 * t)
E_ = np.exp(-epslison * k**2 * t)
g = -0.5j * E * k
return g*np.fft.fft(np.real(np.fft.ifft(E_*U_hat))**2)
def burgers_solout_dense(U_hat, ts):
y = np.zeros_like(U_hat)
epslison = 0.1
N = 64
for i in range(len(ts)):
t = ts[i]
k = np.array(range(0,int(N/2)) + [0] + range(-int(N/2)+1,0))
E_ = np.exp(-epslison * k**2 * t)
y[i] = np.squeeze(np.real(np.fft.ifft(E_*U_hat[i])))
return y
def burgers_problem_dense(num):
t0 = 0.
tf = 3.
t = np.linspace(t0, tf, num=num, endpoint=True)
y0 = burgers_init(t0)
y_ref = np.loadtxt("reference_burgers_dense.txt")
compare_performance_dense(burgers_func, y0, t, y_ref, "burgers_problem_dense", tol_boundary=(0,4), nsteps=10e4, is_complex=True, solout=burgers_solout_dense)
########### RUN TESTS ###########
if __name__ == "__main__":
num = 50
nbod_problem_dense(num)
kdv_problem_dense(num)
burgers_problem_dense(num)
|
hubert667/AIR | refs/heads/master | build/kombu/kombu/async/__init__.py | 39 | # -*- coding: utf-8 -*-
"""
kombu.async
===========
Event loop implementation.
"""
from __future__ import absolute_import
from .hub import Hub, get_event_loop, set_event_loop
from kombu.utils.eventio import READ, WRITE, ERR
__all__ = ['READ', 'WRITE', 'ERR', 'Hub', 'get_event_loop', 'set_event_loop']
|
mattesno1/Sick-Beard | refs/heads/master | lib/hachoir_parser/container/asn1.py | 90 | """
Abstract Syntax Notation One (ASN.1) parser.
Technical informations:
* PER standard
http://www.tu.int/ITU-T/studygroups/com17/languages/X.691-0207.pdf
* Python library
http://pyasn1.sourceforge.net/
* Specification of Abstract Syntax Notation One (ASN.1)
ISO/IEC 8824:1990 Information Technology
* Specification of Basic Encoding Rules (BER) for ASN.1
ISO/IEC 8825:1990 Information Technology
* OpenSSL asn1parser, use command:
openssl asn1parse -i -inform DER -in file.der
* ITU-U recommendations:
http://www.itu.int/rec/T-REC-X/en
(X.680, X.681, X.682, X.683, X.690, X.691, X.692, X.693, X.694)
* dumpasn1
http://www.cs.auckland.ac.nz/~pgut001/dumpasn1.c
General information:
* Wikipedia (english) article
http://en.wikipedia.org/wiki/Abstract_Syntax_Notation_One
* ASN.1 information site
http://asn1.elibel.tm.fr/en/
* ASN.1 consortium
http://www.asn1.org/
Encodings:
* Basic Encoding Rules (BER)
* Canonical Encoding Rules (CER) -- DER derivative that is not widely used
* Distinguished Encoding Rules (DER) -- used for encrypted applications
* XML Encoding Rules (XER)
* Packed Encoding Rules (PER) -- result in the fewest number of bytes
* Generic String Encoding Rules (GSER)
=> Are encodings compatibles? Which encodings are supported??
Author: Victor Stinner
Creation date: 24 september 2006
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet,
FieldError, ParserError,
Bit, Bits, Bytes, UInt8, GenericInteger, String,
Field, Enum, RawBytes)
from lib.hachoir_core.endian import BIG_ENDIAN
from lib.hachoir_core.tools import createDict, humanDatetime
from lib.hachoir_core.stream import InputStreamError
from lib.hachoir_core.text_handler import textHandler
# --- Field parser ---
class ASNInteger(Field):
"""
Integer: two cases:
- first byte in 0..127: it's the value
- first byte in 128..255: byte & 127 is the number of bytes,
next bytes are the value
"""
def __init__(self, parent, name, description=None):
Field.__init__(self, parent, name, 8, description)
stream = self._parent.stream
addr = self.absolute_address
value = stream.readBits(addr, 8, BIG_ENDIAN)
if 128 <= value:
nbits = (value & 127) * 8
if not nbits:
raise ParserError("ASN.1: invalid ASN integer size (zero)")
if 64 < nbits:
# Arbitrary limit to catch errors
raise ParserError("ASN.1: ASN integer is limited to 64 bits")
self._size = 8 + nbits
value = stream.readBits(addr+8, nbits, BIG_ENDIAN)
self.createValue = lambda: value
class OID_Integer(Bits):
def __init__(self, parent, name, description=None):
Bits.__init__(self, parent, name, 8, description)
stream = self._parent.stream
addr = self.absolute_address
size = 8
value = 0
byte = stream.readBits(addr, 8, BIG_ENDIAN)
value = byte & 127
while 128 <= byte:
addr += 8
size += 8
if 64 < size:
# Arbitrary limit to catch errors
raise ParserError("ASN.1: Object identifier is limited 64 bits")
byte = stream.readBits(addr, 8, BIG_ENDIAN)
value = (value << 7) + (byte & 127)
self._size = size
self.createValue = lambda: value
def readSequence(self, content_size):
while self.current_size < self.size:
yield Object(self, "item[]")
def readSet(self, content_size):
yield Object(self, "value", size=content_size*8)
def readASCIIString(self, content_size):
yield String(self, "value", content_size, charset="ASCII")
def readUTF8String(self, content_size):
yield String(self, "value", content_size, charset="UTF-8")
def readBMPString(self, content_size):
yield String(self, "value", content_size, charset="UTF-16")
def readBitString(self, content_size):
yield UInt8(self, "padding_size", description="Number of unused bits")
if content_size > 1:
yield Bytes(self, "value", content_size-1)
def readOctetString(self, content_size):
yield Bytes(self, "value", content_size)
def formatObjectID(fieldset):
text = [ fieldset["first"].display ]
items = [ field for field in fieldset if field.name.startswith("item[") ]
text.extend( str(field.value) for field in items )
return ".".join(text)
def readObjectID(self, content_size):
yield textHandler(UInt8(self, "first"), formatFirstObjectID)
while self.current_size < self.size:
yield OID_Integer(self, "item[]")
def readBoolean(self, content_size):
if content_size != 1:
raise ParserError("Overlong boolean: got %s bytes, expected 1 byte"%content_size)
yield textHandler(UInt8(self, "value"), lambda field:str(bool(field.value)))
def readInteger(self, content_size):
# Always signed?
yield GenericInteger(self, "value", True, content_size*8)
# --- Format ---
def formatFirstObjectID(field):
value = field.value
return "%u.%u" % (value // 40, value % 40)
def formatValue(fieldset):
return fieldset["value"].display
def formatUTCTime(fieldset):
import datetime
value = fieldset["value"].value
year = int(value[0:2])
if year < 50:
year += 2000
else:
year += 1900
month = int(value[2:4])
day = int(value[4:6])
hour = int(value[6:8])
minute = int(value[8:10])
if value[-1] == "Z":
second = int(value[10:12])
dt = datetime.datetime(year, month, day, hour, minute, second)
else:
# Skip timezone...
dt = datetime.datetime(year, month, day, hour, minute)
return humanDatetime(dt)
# --- Object parser ---
class Object(FieldSet):
TYPE_INFO = {
0: ("end[]", None, "End (reserved for BER, None)", None), # TODO: Write parser
1: ("boolean[]", readBoolean, "Boolean", None),
2: ("integer[]", readInteger, "Integer", None),
3: ("bit_str[]", readBitString, "Bit string", None),
4: ("octet_str[]", readOctetString, "Octet string", None),
5: ("null[]", None, "NULL (empty, None)", None),
6: ("obj_id[]", readObjectID, "Object identifier", formatObjectID),
7: ("obj_desc[]", None, "Object descriptor", None), # TODO: Write parser
8: ("external[]", None, "External, instance of", None), # TODO: Write parser # External?
9: ("real[]", readASCIIString, "Real number", None), # TODO: Write parser
10: ("enum[]", readInteger, "Enumerated", None),
11: ("embedded[]", None, "Embedded PDV", None), # TODO: Write parser
12: ("utf8_str[]", readUTF8String, "Printable string", None),
13: ("rel_obj_id[]", None, "Relative object identifier", None), # TODO: Write parser
14: ("time[]", None, "Time", None), # TODO: Write parser
# 15: invalid??? sequence of???
16: ("seq[]", readSequence, "Sequence", None),
17: ("set[]", readSet, "Set", None),
18: ("num_str[]", readASCIIString, "Numeric string", None),
19: ("print_str[]", readASCIIString, "Printable string", formatValue),
20: ("teletex_str[]", readASCIIString, "Teletex (T61, None) string", None),
21: ("videotex_str[]", readASCIIString, "Videotex string", None),
22: ("ia5_str[]", readASCIIString, "IA5 string", formatValue),
23: ("utc_time[]", readASCIIString, "UTC time", formatUTCTime),
24: ("general_time[]", readASCIIString, "Generalized time", None),
25: ("graphic_str[]", readASCIIString, "Graphic string", None),
26: ("visible_str[]", readASCIIString, "Visible (ISO64, None) string", None),
27: ("general_str[]", readASCIIString, "General string", None),
28: ("universal_str[]", readASCIIString, "Universal string", None),
29: ("unrestricted_str[]", readASCIIString, "Unrestricted string", None),
30: ("bmp_str[]", readBMPString, "BMP string", None),
# 31: multiple octet tag number, TODO: not supported
# Extended tag values:
# 31: Date
# 32: Time of day
# 33: Date-time
# 34: Duration
}
TYPE_DESC = createDict(TYPE_INFO, 2)
CLASS_DESC = {0: "universal", 1: "application", 2: "context", 3: "private"}
FORM_DESC = {False: "primitive", True: "constructed"}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
key = self["type"].value & 31
if self['class'].value == 0:
# universal object
if key in self.TYPE_INFO:
self._name, self._handler, self._description, create_desc = self.TYPE_INFO[key]
if create_desc:
self.createDescription = lambda: "%s: %s" % (self.TYPE_INFO[key][2], create_desc(self))
self._description = None
elif key == 31:
raise ParserError("ASN.1 Object: tag bigger than 30 are not supported")
else:
self._handler = None
elif self['form'].value:
# constructed: treat as sequence
self._name = 'seq[]'
self._handler = readSequence
self._description = 'constructed object type %i' % key
else:
# primitive, context/private
self._name = 'raw[]'
self._handler = readASCIIString
self._description = '%s object type %i' % (self['class'].display, key)
field = self["size"]
self._size = field.address + field.size + field.value*8
def createFields(self):
yield Enum(Bits(self, "class", 2), self.CLASS_DESC)
yield Enum(Bit(self, "form"), self.FORM_DESC)
if self['class'].value == 0:
yield Enum(Bits(self, "type", 5), self.TYPE_DESC)
else:
yield Bits(self, "type", 5)
yield ASNInteger(self, "size", "Size in bytes")
size = self["size"].value
if size:
if self._handler:
for field in self._handler(self, size):
yield field
else:
yield RawBytes(self, "raw", size)
class ASN1File(Parser):
PARSER_TAGS = {
"id": "asn1",
"category": "container",
"file_ext": ("der",),
"min_size": 16,
"description": "Abstract Syntax Notation One (ASN.1)"
}
endian = BIG_ENDIAN
def validate(self):
try:
root = self[0]
except (InputStreamError, FieldError):
return "Unable to create root object"
if root.size != self.size:
return "Invalid root object size"
return True
def createFields(self):
yield Object(self, "root")
|
dandanvidi/catalytic-rates | refs/heads/master | scripts/get_data_cache_pFVA_results.py | 1 | from rcat import RCAT
import pandas as pd
import numpy as np
from cobra.core import Metabolite, Reaction
from cobra.io.sbml import create_cobra_model_from_sbml_file
from cobra.manipulation.modify import convert_to_irreversible
from copy import deepcopy
from cobra.flux_analysis.variability import flux_variability_analysis
def perform_pFVA(model, cs, gr, ur, reactions, fraction_of_optimum=1.0):
model = deepcopy(model)
convert_to_irreversible(model)
rxns = dict([(r.id, r) for r in model.reactions])
rxns['EX_glc_e'].lower_bound = 0 # uptake of carbon source reaction is initialized
try:
rxns['EX_' + cs + '_e'].lower_bound = -ur # redefine sole carbon source uptake reaction in mmol/gr/h
except:
print cs, ur
rxns['EX_glc_e'].lower_bound = -ur
rxns['Ec_biomass_iJO1366_core_53p95M'].upper_bound = gr
rxns['Ec_biomass_iJO1366_core_53p95M'].lower_bound = gr
fake = Metabolite(id='fake')
model.add_metabolites(fake)
for r in model.reactions:
r.add_metabolites({fake:1})
flux_counter = Reaction(name='flux_counter')
flux_counter.add_metabolites(metabolites={fake:-1})
model.add_reaction(flux_counter)
model.change_objective(flux_counter)
print "solving pFVA"
fva = flux_variability_analysis(model,
reaction_list=reactions,
objective_sense='minimize',
fraction_of_optimum=fraction_of_optimum)
return fva
# flux_ranges = pd.Series(reactions)
#
# for r, v in fva.iteritems():
# flux_ranges[r] = v['maximum'] - v['minimum']
#
## flux_ranges.to_csv("cache/reactions_to_pfva_ranges")
#
# return flux_ranges
if __name__ == "__main__":
R = RCAT()
model_fname = "../data/iJO1366.xml"
model = create_cobra_model_from_sbml_file(model_fname)
convert_to_irreversible(model)
fluxes = pd.DataFrame(index=R.rmaxn.index, columns=['minimum', 'maximum'])
for c in R.gc.iterrows():
reactions = R.rmaxn[R.rmaxn.condition==c[0]].index
if len(reactions)!=0:
model = create_cobra_model_from_sbml_file(model_fname)
convert_to_irreversible(model)
gr = c[1]['growth rate (h-1)']
cs = c[1]['carbon source']
ur = c[1]['uptake rate [mmol/gCDW/h]']
if np.isnan(ur):
ur = 18.5
model = create_cobra_model_from_sbml_file(model_fname)
fva = perform_pFVA(model, cs, gr, ur, reactions)
for k, v in fva.iteritems():
fluxes['minimum'][k] = v['minimum']
fluxes['maximum'][k] = v['maximum']
fluxes.to_csv('../cache/flux_variability[mmol_h_gCDW]_relaxation=0.csv')
model = create_cobra_model_from_sbml_file(model_fname)
convert_to_irreversible(model)
fluxes = pd.DataFrame(index=R.rmaxn.index, columns=['minimum', 'maximum'])
for c in R.gc.iterrows():
reactions = R.rmaxn[R.rmaxn.condition==c[0]].index
if len(reactions)!=0:
model = create_cobra_model_from_sbml_file(model_fname)
convert_to_irreversible(model)
gr = c[1]['growth rate (h-1)']
cs = c[1]['carbon source']
ur = c[1]['uptake rate [mmol/gCDW/h]']
if np.isnan(ur):
ur = 18.5
model = create_cobra_model_from_sbml_file(model_fname)
fva = perform_pFVA(model, cs, gr, ur, reactions)
for k, v in fva.iteritems():
fluxes['minimum'][k] = v['minimum']
fluxes['maximum'][k] = v['maximum']
fluxes.to_csv('../cache/flux_variability[mmol_h_gCDW]_relaxation=0_1.csv')
|
daniel-hou0/horizon | refs/heads/master | django-openstack/django_openstack/tests/broken/keypair_tests.py | 21 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for key pair views.
"""
import boto.ec2.keypair
import mox
from django.core.urlresolvers import reverse
from django_openstack.nova.tests.base import (BaseProjectViewTests,
TEST_PROJECT)
TEST_KEY = 'test_key'
class KeyPairViewTests(BaseProjectViewTests):
def test_index(self):
self.mox.StubOutWithMock(self.project, 'get_key_pairs')
self.project.get_key_pairs().AndReturn([])
self.mox.ReplayAll()
response = self.client.get(reverse('nova_keypairs',
args=[TEST_PROJECT]))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response,
'django_openstack/nova/keypairs/index.html')
self.assertEqual(len(response.context['keypairs']), 0)
self.mox.VerifyAll()
def test_add_keypair(self):
key = boto.ec2.keypair.KeyPair()
key.name = TEST_KEY
self.mox.StubOutWithMock(self.project, 'create_key_pair')
self.project.create_key_pair(key.name).AndReturn(key)
self.mox.StubOutWithMock(self.project, 'has_key_pair')
self.project.has_key_pair(key.name).AndReturn(False)
self.mox.ReplayAll()
url = reverse('nova_keypairs_add', args=[TEST_PROJECT])
data = {'js': '0', 'name': key.name}
res = self.client.post(url, data)
self.assertEqual(res.status_code, 200)
self.assertEqual(res['Content-Type'], 'application/binary')
self.mox.VerifyAll()
def test_delete_keypair(self):
self.mox.StubOutWithMock(self.project, 'delete_key_pair')
self.project.delete_key_pair(TEST_KEY).AndReturn(None)
self.mox.ReplayAll()
data = {'key_name': TEST_KEY}
url = reverse('nova_keypairs_delete', args=[TEST_PROJECT])
res = self.client.post(url, data)
self.assertRedirectsNoFollow(res, reverse('nova_keypairs',
args=[TEST_PROJECT]))
self.mox.VerifyAll()
def test_download_keypair(self):
material = 'abcdefgh'
session = self.client.session
session['key.%s' % TEST_KEY] = material
session.save()
res = self.client.get(reverse('nova_keypairs_download',
args=['test', TEST_KEY]))
self.assertEqual(res.status_code, 200)
self.assertEqual(res['Content-Type'], 'application/binary')
self.assertContains(res, material)
|
gdementen/larray | refs/heads/master | larray/inout/session.py | 2 | from __future__ import absolute_import, division, print_function
from larray.util.compat import basestring
handler_classes = {}
ext_default_engine = {}
def register_file_handler(engine, extensions=None):
r"""Class decorator to register new file handler class
Parameters
----------
engine : str
Engine name associated with the file handler.
extensions : str or list of str, optional
Extension(s) associated with the file handler.
"""
def decorate_class(cls):
if engine not in handler_classes:
handler_classes[engine] = cls
if extensions is None:
exts = []
elif isinstance(extensions, basestring):
exts = [extensions]
else:
exts = extensions
for ext in exts:
ext_default_engine[ext] = engine
return cls
return decorate_class
def get_file_handler(engine):
if engine not in handler_classes:
raise TypeError("Engine {} is currently not implemented".format(engine))
file_handler_cls = handler_classes[engine]
return file_handler_cls
|
motion2015/a3 | refs/heads/a3 | openedx/core/djangoapps/user_api/accounts/serializers.py | 8 | from rest_framework import serializers
from django.contrib.auth.models import User
from student.models import UserProfile
from openedx.core.djangoapps.user_api.accounts import NAME_MIN_LENGTH
class AccountUserSerializer(serializers.HyperlinkedModelSerializer):
"""
Class that serializes the portion of User model needed for account information.
"""
class Meta:
model = User
fields = ("username", "email", "date_joined", "is_active")
read_only_fields = ("username", "email", "date_joined", "is_active")
class AccountLegacyProfileSerializer(serializers.HyperlinkedModelSerializer):
"""
Class that serializes the portion of UserProfile model needed for account information.
"""
class Meta:
model = UserProfile
fields = (
"name", "gender", "goals", "year_of_birth", "level_of_education", "language", "country", "mailing_address"
)
# Currently no read-only field, but keep this so view code doesn't need to know.
read_only_fields = ()
def validate_name(self, attrs, source):
""" Enforce minimum length for name. """
if source in attrs:
new_name = attrs[source].strip()
if len(new_name) < NAME_MIN_LENGTH:
raise serializers.ValidationError(
"The name field must be at least {} characters long.".format(NAME_MIN_LENGTH)
)
attrs[source] = new_name
return attrs
def transform_gender(self, obj, value):
""" Converts empty string to None, to indicate not set. Replaced by to_representation in version 3. """
return AccountLegacyProfileSerializer.convert_empty_to_None(value)
def transform_country(self, obj, value):
""" Converts empty string to None, to indicate not set. Replaced by to_representation in version 3. """
return AccountLegacyProfileSerializer.convert_empty_to_None(value)
def transform_level_of_education(self, obj, value):
""" Converts empty string to None, to indicate not set. Replaced by to_representation in version 3. """
return AccountLegacyProfileSerializer.convert_empty_to_None(value)
@staticmethod
def convert_empty_to_None(value):
""" Helper method to convert empty string to None (other values pass through). """
return None if value == "" else value
|
Drooids/odoo | refs/heads/8.0 | addons/l10n_co/__openerp__.py | 256 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) David Arnold (devCO).
# Author David Arnold (devCO), dar@devco.co
# Co-Authors Juan Pablo Aries (devCO), jpa@devco.co
# Hector Ivan Valencia Muñoz (TIX SAS)
# Nhomar Hernandez (Vauxoo)
# Humberto Ochoa (Vauxoo)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Colombian - Accounting',
'version': '0.8',
'category': 'Localization/Account Charts',
'description': 'Colombian Accounting and Tax Preconfiguration',
'author': 'David Arnold BA HSG (devCO)',
'depends': [
'account',
'base_vat',
'account_chart',
],
'data': [
'data/account.account.type.csv',
'data/account.account.template.csv',
'data/account.tax.code.template.csv',
'data/account_chart_template.xml',
'data/account.tax.template.csv',
'wizard/account_wizard.xml',
],
'demo': [],
'installable': True,
}
|
ryano144/intellij-community | refs/heads/master | python/testData/mover/with.py | 83 | def temp(filepath):
a = 1
with open(filepath) as f:
l = <caret>f.readlines()
for line in l:
a = 1
|
nortd/bomfu | refs/heads/master | main.py | 1 | #!/usr/bin/env python
##
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'Rodrigo Augosto (@coto)'
import os, sys
# Third party libraries path must be fixed before importing webapp2
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'boilerplate/external'))
import webapp2
import routes
from boilerplate import routes as boilerplate_routes
from admin import routes as admin_routes
from boilerplate import config as boilerplate_config
import config
from boilerplate.lib.basehandler import handle_error
webapp2_config = boilerplate_config.config
webapp2_config.update(config.config)
app = webapp2.WSGIApplication(debug=os.environ['SERVER_SOFTWARE'].startswith('Dev'), config=webapp2_config)
for status_int in app.config['error_templates']:
app.error_handlers[status_int] = handle_error
routes.add_routes(app)
boilerplate_routes.add_routes(app)
admin_routes.add_routes(app)
|
neumerance/cloudloon2 | refs/heads/master | openstack_dashboard/dashboards/admin/resource_rates/tables.py | 2 | import logging
from django.core.urlresolvers import reverse # noqa
from django.template import defaultfilters as filters
from django.utils.http import urlencode # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import tables
from openstack_dashboard.dashboards.admin.models import ResourceRates
from openstack_dashboard.dashboards.admin.resource_rates \
import workflows as flavor_workflows
class CreateResourceRate(tables.LinkAction):
name = "create"
verbose_name = _("Create Resource Rate")
url = "horizon:admin:resource_rates:create"
classes = ("ajax-modal", "btn-create")
class UpdateResourceRate(tables.LinkAction):
name = "update"
verbose_name = _("Update Resource Rate")
url = "horizon:admin:resource_rates:update"
classes = ("ajax-modal", "btn-create")
class DeleteResourceRate(tables.DeleteAction):
data_type_singular = _("Resource Rate")
data_type_plural = _("Resource Rates")
def delete(self, request, obj_id):
try:
rates = ResourceRates.objects.filter(id=obj_id)
rates.delete()
except Exception:
exceptions.handle(request,
_('Unable to retrieve resources rates list.'))
class RatingTable(tables.DataTable):
type = tables.Column('type', verbose_name=_('Resource Type'))
rate = tables.Column('rate', verbose_name=_('Rate'))
class Meta:
name = "rates"
verbose_name = _("Resource Ratings")
table_actions = (CreateResourceRate, DeleteResourceRate)
row_actions = (UpdateResourceRate,
DeleteResourceRate)
|
xhook/asterisk-v11 | refs/heads/master | res/pjproject/tests/automated/configure.py | 2 | #!/usr/bin/python
import optparse
import os
import platform
import socket
import subprocess
import sys
PROG = "r" + "$Rev$".strip("$ ").replace("Rev: ", "")
PYTHON = os.path.basename(sys.executable)
build_type = ""
vs_target = ""
s60_target = ""
no_test = False
no_pjsua_test = False
#
# Get gcc version
#
def gcc_version(gcc):
proc = subprocess.Popen(gcc + " -v", stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True)
ver = ""
while True:
s = proc.stdout.readline()
if not s:
break
if s.find("gcc version") >= 0:
ver = s.split(None, 3)[2]
break
proc.wait()
return "gcc-" + ver
#
# Get Visual Studio info
#
class VSVersion:
def __init__(self):
self.version = "8"
self.release = "2005"
proc = subprocess.Popen("cl", stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
while True:
s = proc.stdout.readline()
if s=="":
break
pos = s.find("Version")
if pos > 0:
proc.wait()
s = s[pos+8:]
ver = s.split(None, 1)[0]
major = ver[0:2]
if major=="12":
self.version = "6"
self.release = "98"
break
elif major=="13":
self.version = "7"
self.release = "2003"
break
elif major=="14":
self.version = "8"
self.release = "2005"
break
elif major=="15":
self.version = "9"
self.release = "2008"
break
elif major=="16":
self.version = "10"
self.release = "2010"
break
else:
self.version = "11"
self.release = "2012"
break
proc.wait()
self.vs_version = "vs" + self.version
self.vs_release = "vs" + self.release
#
# Get S60 SDK info
#
class S60SDK:
def __init__(self):
self.epocroot = ""
self.sdk = ""
self.device = ""
# Check that EPOCROOT is set
if not "EPOCROOT" in os.environ:
sys.stderr.write("Error: EPOCROOT environment variable is not set\n")
sys.exit(1)
epocroot = os.environ["EPOCROOT"]
# EPOCROOT must have trailing backslash
if epocroot[-1] != "\\":
epocroot = epocroot + "\\"
os.environ["EPOCROOT"] = epocroot
self.epocroot = epocroot
self.sdk = sdk1 = epocroot.split("\\")[-2]
self.device = "@" + self.sdk + ":com.nokia.s60"
# Check that correct device is set
proc = subprocess.Popen("devices", stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True)
sdk2 = ""
while True:
line = proc.stdout.readline()
if line.find("- default") > 0:
sdk2 = line.split(":",1)[0]
break
proc.wait()
if sdk1 != sdk2:
sys.stderr.write("Error: default SDK in device doesn't match EPOCROOT\n")
sys.stderr.write("Default device SDK = '" + sdk2 + "'\n")
sys.stderr.write("EPOCROOT SDK = '" + sdk1 + "'\n")
sys.exit(1)
self.name = sdk2.replace("_", "-")
def replace_vars(text):
global vs_target, s60_target, build_type, no_test, no_pjsua_test
suffix = ""
os_info = platform.system() + platform.release() + "-" + platform.machine()
# osinfo
s60sdk_var = None
if build_type == "s60":
s60sdk_var = S60SDK()
os_info = s60sdk_var.name
elif platform.system().lower() == "windows" or platform.system().lower() == "microsoft":
if platform.system().lower() == "microsoft":
os_info = platform.release() + "-" + platform.version() + "-" + platform.win32_ver()[2]
elif platform.system().lower() == "linux":
os_info = "-" + "-".join(platform.linux_distribution()[0:2])
# vs_target
if not vs_target and text.find("$(VSTARGET)") >= 0:
if build_type != "vs":
sys.stderr.write("Warning: $(VSTARGET) only valid for Visual Studio\n")
print "Enter Visual Studio vs_target name (e.g. Release, Debug) [Release]: ",
vs_target = sys.stdin.readline().replace("\n", "").replace("\r", "")
if not vs_target:
vs_target = "Release"
# s60_target
if not s60_target and text.find("$(S60TARGET)") >= 0:
if build_type != "s60":
sys.stderr.write("Warning: $(S60TARGET) only valid for S60\n")
print "Enter S60 target name (e.g. \"gcce urel\") [gcce urel]: ",
s60_target = sys.stdin.readline().replace("\n", "").replace("\r", "")
if not s60_target:
s60_target = "gcce urel"
# Suffix
if build_type == "vs":
suffix = "i386-Win32-vc8-" + vs_target
elif build_type == "s60":
suffix = s60sdk_var.name + "-" + s60_target.replace(" ", "-")
elif build_type == "gnu":
proc = subprocess.Popen("sh config.guess", cwd="../..",
shell=True, stdout=subprocess.PIPE)
suffix = proc.stdout.readline().rstrip(" \r\n")
else:
sys.stderr.write("Error: unsupported build type '" + build_type + "'\n")
sys.exit(1)
while True:
if text.find("$(PJSUA-TESTS)") >= 0:
if no_test==False and no_pjsua_test==False:
# Determine pjsua exe to use
exe = "../../pjsip-apps/bin/pjsua-" + suffix
proc = subprocess.Popen(PYTHON + " runall.py --list-xml -e " + exe,
cwd="../pjsua",
shell=True, stdout=subprocess.PIPE)
content = proc.stdout.read()
else:
content = ""
text = text.replace("$(PJSUA-TESTS)", content)
elif text.find("$(GCC)") >= 0:
text = text.replace("$(GCC)", gcc_version("gcc"))
elif text.find("$(VS)") >= 0:
vsver = VSVersion()
text = text.replace("$(VS)", VSVersion().vs_release)
elif text.find("$(VSTARGET)") >= 0:
text = text.replace("$(VSTARGET)", vs_target)
elif text.find("$(S60TARGET)") >= 0:
text = text.replace("$(S60TARGET)", s60_target)
elif text.find("$(S60TARGETNAME)") >= 0:
text = text.replace("$(S60TARGETNAME)", s60_target.replace(" ", "-"))
elif text.find("$(S60DEVICE)") >= 0:
text = text.replace("$(S60DEVICE)", s60sdk_var.device)
elif text.find("$(EPOCROOT)") >= 0:
text = text.replace("$(EPOCROOT)", s60sdk_var.epocroot)
elif text.find("$(DISABLED)") >= 0:
text = text.replace("$(DISABLED)", "0")
elif text.find("$(IPPROOT)") >= 0:
if not os.environ.has_key("IPPROOT"):
sys.stderr.write("Error: environment variable IPPROOT is needed but not set\n")
sys.exit(1)
text = text.replace("$(IPPROOT)", os.environ["IPPROOT"])
elif text.find("$(IPPSAMPLES)") >= 0:
if not os.environ.has_key("IPPSAMPLES"):
sys.stderr.write("Error: environment variable IPPSAMPLES is needed but not set\n")
sys.exit(1)
text = text.replace("$(IPPSAMPLES)", os.environ["IPPSAMPLES"])
elif text.find("$(IPPARCH)") >= 0:
if not os.environ.has_key("IPPARCH"):
text = text.replace("$(IPPARCH)", "")
else:
text = text.replace("$(IPPARCH)", os.environ["IPPARCH"])
elif text.find("$(OS)") >= 0:
text = text.replace("$(OS)", os_info)
elif text.find("$(SUFFIX)") >= 0:
text = text.replace("$(SUFFIX)", suffix)
elif text.find("$(HOSTNAME)") >= 0:
text = text.replace("$(HOSTNAME)", socket.gethostname())
elif text.find("$(PJDIR)") >= 0:
wdir = os.path.join(os.getcwd(), "../..")
wdir = os.path.normpath(wdir)
text = text.replace("$(PJDIR)", wdir)
elif text.find("$(NOP)") >= 0:
if platform.system().lower() == "windows" or platform.system().lower() == "microsoft":
cmd = "CMD /C echo Success"
else:
cmd = "echo Success"
text = text.replace("$(NOP)", cmd)
elif text.find("$(NOTEST)") >= 0:
if no_test:
str = '"1"'
else:
str = '"0"'
text = text.replace("$(NOTEST)", str)
else:
break
return text
def main(args):
global vs_target, s60_target, build_type, no_test, no_pjsua_test
output = sys.stdout
usage = """Usage: configure.py [OPTIONS] scenario_template_file
Where OPTIONS:
-o FILE Output to file, otherwise to stdout.
-t TYPE Specify build type. If not specified, it will be
asked if necessary. Values are:
vs: Visual Studio
gnu: Makefile based
s60: Symbian S60
-vstarget TARGETNAME Specify Visual Studio target name if build type is set
to vs. If not specified then it will be asked.
Sample target names:
- Debug
- Release
- or any other target in the project file
-s60target TARGETNAME Specify S60 target name if build type is set to s60.
If not specified then it will be asked. Sample target
names:
- "gcce udeb"
- "gcce urel"
-notest Disable all tests in the scenario.
-nopjsuatest Disable pjsua tests in the scenario.
"""
args.pop(0)
while len(args):
if args[0]=='-o':
args.pop(0)
if len(args):
output = open(args[0], "wt")
args.pop(0)
else:
sys.stderr.write("Error: needs value for -o\n")
sys.exit(1)
elif args[0]=='-vstarget':
args.pop(0)
if len(args):
vs_target = args[0]
args.pop(0)
else:
sys.stderr.write("Error: needs value for -vstarget\n")
sys.exit(1)
elif args[0]=='-s60target':
args.pop(0)
if len(args):
s60_target = args[0]
args.pop(0)
else:
sys.stderr.write("Error: needs value for -s60target\n")
sys.exit(1)
elif args[0]=='-t':
args.pop(0)
if len(args):
build_type = args[0].lower()
args.pop(0)
else:
sys.stderr.write("Error: needs value for -t\n")
sys.exit(1)
if not ["vs", "gnu", "s60"].count(build_type):
sys.stderr.write("Error: invalid -t argument value\n")
sys.exit(1)
elif args[0]=='-notest' or args[0]=='-notests':
args.pop(0)
no_test = True
elif args[0]=='-nopjsuatest' or args[0]=='-nopjsuatests':
args.pop(0)
no_pjsua_test = True
else:
break
if len(args) != 1:
sys.stderr.write(usage + "\n")
return 1
if not build_type:
defval = "vs"
if "SHELL" in os.environ:
shell = os.environ["SHELL"]
if shell.find("sh") > -1:
defval = "gnu"
print "Enter the build type (values: vs, gnu, s60) [%s]: " % (defval),
build_type = sys.stdin.readline().replace("\n", "").replace("\r", "")
if not build_type:
build_type = defval
tpl_file = args[len(args)-1]
if not os.path.isfile(tpl_file):
print "Error: unable to find template file '%s'" % (tpl_file)
return 1
f = open(tpl_file, "r")
tpl = f.read()
f.close()
tpl = replace_vars(tpl)
output.write(tpl)
if output != sys.stdout:
output.close()
return 0
if __name__ == "__main__":
rc = main(sys.argv)
sys.exit(rc)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.