code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/python
import subprocess
import praw
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection
reload(sys)
sys.setdefaultencoding('utf8')
# Edit Me!
# Each day after you post a signup post, copy its 6-character ID to this array.
signupPageSubmissionIds = [ '548n29', '54f07c', '54kdo2', '54q51x', '54w7mo', '550qge', '557bo3' ]
flaskport = 8870
app = Flask(__name__)
app.debug = True
commentHashesAndComments = {}
def loginAndReturnRedditSession():
config = ConfigParser()
config.read("../reddit-password-credentials.cfg")
user = config.get("Reddit", "user")
password = config.get("Reddit", "password")
# TODO: password auth is going away, and we will soon need to do oauth.
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
redditSession.login(user, password, disable_warning=True)
# submissions = redditSession.get_subreddit('pornfree').get_hot(limit=5)
# print [str(x) for x in submissions]
return redditSession
def loginOAuthAndReturnRedditSession():
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
o = OAuth2Util.OAuth2Util(redditSession, print_log=True, configfile="../reddit-oauth-credentials.cfg")
o.refresh(force=True)
return redditSession
def getSubmissionsForRedditSession(redditSession):
submissions = [redditSession.get_submission(submission_id=submissionId) for submissionId in signupPageSubmissionIds]
for submission in submissions:
submission.replace_more_comments(limit=None, threshold=0)
return submissions
def getCommentsForSubmissions(submissions):
comments = []
for submission in submissions:
comments += praw.helpers.flatten_tree(submission.comments)
return comments
def retireCommentHash(commentHash):
with open("retiredcommenthashes.txt", "a") as commentHashFile:
commentHashFile.write(commentHash + '\n')
def retiredCommentHashes():
with open("retiredcommenthashes.txt", "r") as commentHashFile:
# return commentHashFile.readlines()
return commentHashFile.read().splitlines()
@app.route('/moderatesignups.html')
def moderatesignups():
global commentHashesAndComments
commentHashesAndComments = {}
stringio = StringIO()
stringio.write('<html>\n<head>\n</head>\n\n')
# redditSession = loginAndReturnRedditSession()
redditSession = loginOAuthAndReturnRedditSession()
submissions = getSubmissionsForRedditSession(redditSession)
flat_comments = getCommentsForSubmissions(submissions)
retiredHashes = retiredCommentHashes()
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write(os.getcwd())
stringio.write("<br>\n")
for submission in submissions:
stringio.write(submission.title)
stringio.write("<br>\n")
stringio.write("</h3>\n\n")
stringio.write('<form action="copydisplayduringsignuptoclipboard.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" value="Copy display-during-signup.py stdout to clipboard">')
stringio.write('</form>')
for comment in flat_comments:
# print comment.is_root
# print comment.score
i += 1
commentHash = sha1()
commentHash.update(comment.permalink)
commentHash.update(comment.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
commentHashesAndComments[commentHash] = comment
authorName = str(comment.author) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write('</b></font><br>')
if ParticipantCollection().hasParticipantNamed(authorName):
stringio.write(' <small><font color="green">(member)</font></small>')
# if ParticipantCollection().participantNamed(authorName).isStillIn:
# stringio.write(' <small><font color="green">(in)</font></small>')
# else:
# stringio.write(' <small><font color="red">(out)</font></small>')
else:
stringio.write(' <small><font color="red">(not a member)</font></small>')
stringio.write('<form action="takeaction.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" name="actiontotake" value="Signup" style="color:white;background-color:green">')
# stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
# stringio.write('<input type="submit" name="actiontotake" value="Relapse">')
# stringio.write('<input type="submit" name="actiontotake" value="Reinstate">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">')
stringio.write('<input type="hidden" name="username" value="' + b64encode(authorName) + '">')
stringio.write('<input type="hidden" name="commenthash" value="' + commentHash + '">')
stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
stringio.write('</form>')
stringio.write(bleach.clean(markdown.markdown(comment.body.encode('utf-8')), tags=['p']))
stringio.write("\n<br><br>\n\n")
stringio.write('</html>')
pageString = stringio.getvalue()
stringio.close()
return Response(pageString, mimetype='text/html')
@app.route('/takeaction.html', methods=["POST"])
def takeaction():
username = b64decode(request.form["username"])
commentHash = str(request.form["commenthash"])
# commentPermalink = request.form["commentpermalink"]
actionToTake = request.form["actiontotake"]
# print commentHashesAndComments
comment = commentHashesAndComments[commentHash]
# print "comment: " + str(comment)
if actionToTake == 'Signup':
print "signup - " + username
subprocess.call(['./signup.py', username])
comment.upvote()
retireCommentHash(commentHash)
# if actionToTake == 'Signup and checkin':
# print "signup and checkin - " + username
# subprocess.call(['./signup-and-checkin.sh', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Relapse':
# print "relapse - " + username
# subprocess.call(['./relapse.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Reinstate':
# print "reinstate - " + username
# subprocess.call(['./reinstate.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
elif actionToTake == 'Skip comment':
print "Skip comment - " + username
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == "Skip comment and don't upvote":
print "Skip comment and don't upvote - " + username
retireCommentHash(commentHash)
return Response("hello", mimetype='text/html')
@app.route('/copydisplayduringsignuptoclipboard.html', methods=["POST"])
def copydisplayduringsignuptoclipboard():
print "TODO: Copy display to clipboard"
subprocess.call(['./display-during-signup.py'])
return Response("hello", mimetype='text/html')
if __name__ == '__main__':
app.run(host='127.0.0.1', port=flaskport)
|
foobarbazblarg/stayclean
|
stayclean-2016-october/serve-signups-with-flask.py
|
Python
|
mit
| 8,086
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'qt\dlg_about.ui'
#
# Created: Mon May 20 14:37:48 2013
# by: PyQt4 UI code generator 4.8.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_DialogAbout(object):
def setupUi(self, DialogAbout):
DialogAbout.setObjectName(_fromUtf8("DialogAbout"))
DialogAbout.resize(400, 461)
self.buttonBox = QtGui.QDialogButtonBox(DialogAbout)
self.buttonBox.setGeometry(QtCore.QRect(300, 410, 81, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.lb_logo = QtGui.QLabel(DialogAbout)
self.lb_logo.setGeometry(QtCore.QRect(20, 20, 311, 101))
self.lb_logo.setText(_fromUtf8(""))
self.lb_logo.setPixmap(QtGui.QPixmap(_fromUtf8(":/imgs/logo.png")))
self.lb_logo.setObjectName(_fromUtf8("lb_logo"))
self.lb_description = QtGui.QLabel(DialogAbout)
self.lb_description.setGeometry(QtCore.QRect(20, 111, 360, 151))
self.lb_description.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.lb_description.setWordWrap(True)
self.lb_description.setObjectName(_fromUtf8("lb_description"))
self.lb_logo_gem = QtGui.QLabel(DialogAbout)
self.lb_logo_gem.setGeometry(QtCore.QRect(20, 270, 150, 100))
self.lb_logo_gem.setPixmap(QtGui.QPixmap(_fromUtf8(":/imgs/gem_logo_250X180.png")))
self.lb_logo_gem.setScaledContents(True)
self.lb_logo_gem.setObjectName(_fromUtf8("lb_logo_gem"))
self.lb_logo_imagecat = QtGui.QLabel(DialogAbout)
self.lb_logo_imagecat.setGeometry(QtCore.QRect(200, 280, 151, 51))
self.lb_logo_imagecat.setPixmap(QtGui.QPixmap(_fromUtf8(":/imgs/imagecat_logo.gif")))
self.lb_logo_imagecat.setScaledContents(True)
self.lb_logo_imagecat.setObjectName(_fromUtf8("lb_logo_imagecat"))
self.lb_copyright = QtGui.QLabel(DialogAbout)
self.lb_copyright.setGeometry(QtCore.QRect(20, 380, 361, 31))
self.lb_copyright.setObjectName(_fromUtf8("lb_copyright"))
self.retranslateUi(DialogAbout)
QtCore.QMetaObject.connectSlotsByName(DialogAbout)
def retranslateUi(self, DialogAbout):
DialogAbout.setWindowTitle(QtGui.QApplication.translate("DialogAbout", "About SIDD", None, QtGui.QApplication.UnicodeUTF8))
self.lb_description.setText(QtGui.QApplication.translate("DialogAbout", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:8.25pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">Version: $version<br />Last updated: $lastupdate</span></p>\n"
"<p style=\" margin-top:12px; margin-bottom:12px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:8pt;\">SIDD (Spatial Inventory Data Developer) is developed from GEM Inventory and Damage Capture Tools effort. It is part of a collection of tools that can be used for development of exposure datasets and models at the sub-national level, for exposure dataset development per-building and to capture earthquake consequences per-building </span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.lb_copyright.setText(QtGui.QApplication.translate("DialogAbout", "<p>Copyright ® ImageCat Inc. 2013.</p>", None, QtGui.QApplication.UnicodeUTF8))
import SIDDResource_rc
|
gem/sidd
|
ui/qt/dlg_about_ui.py
|
Python
|
agpl-3.0
| 4,085
|
import time
from django.test import TestCase
from inviteme.forms import ContactMailSecurityForm, ContactMailForm
class ContactMailSecurityFormTestCase(TestCase):
def test_constructor(self):
# timestamp and security_hash calculated during construction
form = ContactMailSecurityForm()
self.assert_(form.initial.get("timestamp", None) != None)
self.assert_(form.initial.get("security_hash", None) != None)
self.assert_(form.initial.get("honeypot", None) == None)
# even though they were provided as initial data
initial = {'timestamp':'1122334455', 'security_hash':'blahblahashed'}
form = ContactMailSecurityForm(initial=initial.copy())
self.assert_(form.initial["timestamp"] != initial["timestamp"])
self.assert_(form.initial["security_hash"] != initial["security_hash"])
def test_clean_timestamp(self):
# check that a timestamp more than two hours old is not accepted
form = ContactMailSecurityForm()
timestamp = int(form.initial["timestamp"]) - (2 * 60 * 61)
security_hash = form.generate_security_hash(timestamp)
data = {"timestamp":str(timestamp), "security_hash":security_hash}
form = ContactMailSecurityForm(data=data)
self.assert_(form.errors.get("timestamp", None) != None)
def test_clean_security_hash(self):
# check that changing the timestamp invalidates the security_hash
form = ContactMailSecurityForm()
data = {"timestamp": str(time.time()),
"security_hash": form.initial["security_hash"]}
form = ContactMailSecurityForm(data=data)
self.assert_(form.errors.get("security_hash", None) != None)
def test_clean_honeypot(self):
# check that validation error raises when honeypot is not empty
form = ContactMailSecurityForm()
data = {"honeypot": "Oh! big mistake!"}
data.update(form.initial)
form = ContactMailSecurityForm(data=data)
self.assert_(form.errors.get("honeypot", None) != None)
EMAIL_ADDR = "alice.liddell@wonderland.com"
class ContactMailFormTestCase(TestCase):
def test_get_instance_data(self):
# check get_contact_msg raises ValueError when form is not valid
form = ContactMailForm()
email = 'jane.bloggs@example.com'
data = {'email': email}
data.update(form.initial)
form = ContactMailForm(data=data)
form.is_valid()
data = form.get_instance_data()
self.assert_( len(data) == 2 )
self.assert_( email == data['email'] )
|
danirus/django-inviteme
|
inviteme/tests/forms.py
|
Python
|
bsd-2-clause
| 2,637
|
# Copyright 2013-15 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright 2015-2016 AvanzOSC
# Copyright 2016 Pedro M. Baeza <pedro.baeza@tecnativa.com>
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from odoo import _, fields, models
from odoo.exceptions import UserError
class StockMove(models.Model):
_inherit = "stock.move"
invoice_line_ids = fields.Many2many(
comodel_name="account.move.line",
relation="stock_move_invoice_line_rel",
column1="move_id",
column2="invoice_line_id",
string="Invoice Line",
copy=False,
readonly=True,
)
def write(self, vals):
"""
User can update any picking in done state, but if this picking already
invoiced the stock move done quantities can be different to invoice
line quantities. So to avoid this inconsistency you can not update any
stock move line in done state and have invoice lines linked.
"""
if "product_uom_qty" in vals and not self.env.context.get(
"bypass_stock_move_update_restriction"
):
for move in self:
if move.state == "done" and move.invoice_line_ids:
raise UserError(_("You can not modify an invoiced stock move"))
return super().write(vals)
|
OCA/stock-logistics-workflow
|
stock_picking_invoice_link/models/stock_move.py
|
Python
|
agpl-3.0
| 1,338
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from pylib import constants
from pylib.local.device import local_device_environment
try:
from pylib.remote.device import remote_device_environment
except ImportError:
remote_device_environment = None
def CreateEnvironment(args, error_func):
if args.environment == 'local':
if args.command not in constants.LOCAL_MACHINE_TESTS:
return local_device_environment.LocalDeviceEnvironment(args, error_func)
# TODO(jbudorick) Add local machine environment.
if args.environment == 'remote_device' and remote_device_environment:
return remote_device_environment.RemoteDeviceEnvironment(args,
error_func)
error_func('Unable to create %s environment.' % args.environment)
|
junhuac/MQUIC
|
src/build/android/pylib/base/environment_factory.py
|
Python
|
mit
| 914
|
from django.core.exceptions import ValidationError
from django.forms import ModelForm
from survey.models.households import Household
from django import forms
class HouseholdForm(ModelForm):
class Meta:
model = Household
exclude = ['investigator', 'location', 'survey', 'household_code']
widgets = {
'ea': forms.HiddenInput(),
}
def __init__(self, is_edit=False, uid=None, survey=None, *args, **kwargs):
super(HouseholdForm, self).__init__(*args, **kwargs)
self.is_editing = is_edit
if not self.is_editing:
self.fields['uid'].initial = Household.next_uid(survey)
else:
self.fields['uid'].initial = self.instance.uid
self.fields['uid'].widget.attrs['disabled'] = 'disabled'
def clean_uid(self):
if not self.is_editing:
try:
uid = self.cleaned_data['uid']
household = Household.objects.filter(uid=int(uid))
if household:
raise ValidationError("Household with this Household Unique Identification already exists.")
except TypeError:
raise ValidationError("This field is required.")
return self.cleaned_data['uid']
|
antsmc2/mics
|
survey/forms/household.py
|
Python
|
bsd-3-clause
| 1,263
|
import unittest
import configparser
import logging
from queue import Queue
from maxima_threads import MaximaWorker
from maxima_threads import RequestController
from config_loader import Config
class MaximaWorkerTests(unittest.TestCase):
def setUp(self):
config = Config()
# I only need the logger if tests fail...
# logging.basicConfig(level=config['General']['loglevel'])
# self.logger = logging.getLogger("tcp2maxima")
self.queries = Queue()
self.worker = MaximaWorker('testWorker', self.queries, config['Maxima'])
self.worker.start()
def tearDown(self):
self.worker.quit_worker()
def testMaximaReply(self):
controller = RequestController('12+12;')
self.queries.put(controller)
controller.wait()
reply = controller.get_reply()
self.assertTrue(reply == '24')
def testTimeout(self):
# We send a incomplete query without a ; - this should
# give us a timeout.
controller = RequestController('12+12')
self.queries.put(controller)
controller.wait()
reply = controller.get_reply()
self.assertTrue(reply == ';ERR;TIMEOUT')
def testNoOutput(self):
controller = RequestController(';')
self.queries.put(controller)
controller.wait()
reply = controller.get_reply()
self.assertTrue(reply == ';ERR;NO_OUTPUT')
def testBurst(self):
controllers = []
for i in range(100):
controllers.append(RequestController('2^3;'))
for req in controllers:
self.queries.put(req)
for rep in controllers:
rep.wait()
reply = rep.get_reply()
self.assertTrue(reply == '8')
def testBurstTimeout(self):
controllers = []
for i in range(10):
controllers.append(RequestController('12^12^12^12;'))
for req in controllers:
self.queries.put(req)
for rep in controllers:
rep.wait()
reply = rep.get_reply()
self.assertTrue(reply == ';ERR;TIMEOUT')
def main():
unittest.main()
if __name__ == '__main__':
main()
|
navigium/tcp2maxima
|
maxima_threads_tests.py
|
Python
|
agpl-3.0
| 2,203
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities used by models pre-trained on ImageNet.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras.utils.data_utils import get_file
from tensorflow.python.platform import tf_logging as logging
CLASS_INDEX = None
CLASS_INDEX_PATH = 'https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json'
def preprocess_input(x, data_format=None):
"""Preprocesses a tensor encoding a batch of images.
Arguments:
x: input Numpy tensor, 4D.
data_format: data format of the image tensor.
Returns:
Preprocessed tensor.
"""
if data_format is None:
data_format = K.image_data_format()
assert data_format in {'channels_last', 'channels_first'}
if data_format == 'channels_first':
if x.ndim == 3:
# 'RGB'->'BGR'
x = x[::-1, ...]
# Zero-center by mean pixel
x[0, :, :] -= 103.939
x[1, :, :] -= 116.779
x[2, :, :] -= 123.68
else:
x = x[:, ::-1, ...]
x[:, 0, :, :] -= 103.939
x[:, 1, :, :] -= 116.779
x[:, 2, :, :] -= 123.68
else:
# 'RGB'->'BGR'
x = x[..., ::-1]
# Zero-center by mean pixel
x[..., 0] -= 103.939
x[..., 1] -= 116.779
x[..., 2] -= 123.68
return x
def decode_predictions(preds, top=5):
"""Decodes the prediction of an ImageNet model.
Arguments:
preds: Numpy tensor encoding a batch of predictions.
top: integer, how many top-guesses to return.
Returns:
A list of lists of top class prediction tuples
`(class_name, class_description, score)`.
One list of tuples per sample in batch input.
Raises:
ValueError: in case of invalid shape of the `pred` array
(must be 2D).
"""
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 1000:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 1000)). '
'Found array with shape: ' + str(preds.shape))
if CLASS_INDEX is None:
fpath = get_file(
'imagenet_class_index.json', CLASS_INDEX_PATH, cache_subdir='models')
CLASS_INDEX = json.load(open(fpath))
results = []
for pred in preds:
top_indices = pred.argsort()[-top:][::-1]
result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
result.sort(key=lambda x: x[2], reverse=True)
results.append(result)
return results
def _obtain_input_shape(input_shape,
default_size,
min_size,
data_format,
require_flatten,
weights=None):
"""Internal utility to compute/validate an ImageNet model's input shape.
Arguments:
input_shape: either None (will return the default network input shape),
or a user-provided shape to be validated.
default_size: default input width/height for the model.
min_size: minimum input width/height accepted by the model.
data_format: image data format to use.
require_flatten: whether the model is expected to
be linked to a classifier via a Flatten layer.
weights: one of `None` (random initialization)
or 'imagenet' (pre-training on ImageNet).
If weights='imagenet' input channels must be equal to 3.
Returns:
An integer shape tuple (may include None entries).
Raises:
ValueError: in case of invalid argument values.
"""
if weights != 'imagenet' and input_shape and len(input_shape) == 3:
if data_format == 'channels_first':
if input_shape[0] not in {1, 3}:
logging.warning('This model usually expects 1 or 3 input channels. '
'However, it was passed an input_shape with ' +
str(input_shape[0]) + ' input channels.')
default_shape = (input_shape[0], default_size, default_size)
else:
if input_shape[-1] not in {1, 3}:
logging.warning('This model usually expects 1 or 3 input channels. '
'However, it was passed an input_shape with ' +
str(input_shape[-1]) + ' input channels.')
default_shape = (default_size, default_size, input_shape[-1])
else:
if data_format == 'channels_first':
default_shape = (3, default_size, default_size)
else:
default_shape = (default_size, default_size, 3)
if weights == 'imagenet' and require_flatten:
if input_shape is not None:
if input_shape != default_shape:
raise ValueError('When setting`include_top=True` '
'and loading `imagenet` weights, '
'`input_shape` should be ' + str(default_shape) + '.')
return default_shape
if input_shape:
if data_format == 'channels_first':
if input_shape is not None:
if len(input_shape) != 3:
raise ValueError('`input_shape` must be a tuple of three integers.')
if input_shape[0] != 3 and weights == 'imagenet':
raise ValueError('The input must have 3 channels; got '
'`input_shape=' + str(input_shape) + '`')
if ((input_shape[1] is not None and input_shape[1] < min_size) or
(input_shape[2] is not None and input_shape[2] < min_size)):
raise ValueError('Input size must be at least ' + str(min_size) + 'x'
+ str(min_size) + '; got '
'`input_shape=' + str(input_shape) + '`')
else:
if input_shape is not None:
if len(input_shape) != 3:
raise ValueError('`input_shape` must be a tuple of three integers.')
if input_shape[-1] != 3 and weights == 'imagenet':
raise ValueError('The input must have 3 channels; got '
'`input_shape=' + str(input_shape) + '`')
if ((input_shape[0] is not None and input_shape[0] < min_size) or
(input_shape[1] is not None and input_shape[1] < min_size)):
raise ValueError('Input size must be at least ' + str(min_size) + 'x'
+ str(min_size) + '; got '
'`input_shape=' + str(input_shape) + '`')
else:
if require_flatten:
input_shape = default_shape
else:
if data_format == 'channels_first':
input_shape = (3, None, None)
else:
input_shape = (None, None, 3)
if require_flatten:
if None in input_shape:
raise ValueError('If `include_top` is True, '
'you should specify a static `input_shape`. '
'Got `input_shape=' + str(input_shape) + '`')
return input_shape
|
pavelchristof/gomoku-ai
|
tensorflow/contrib/keras/python/keras/applications/imagenet_utils.py
|
Python
|
apache-2.0
| 7,528
|
import random
import time
from itertools import permutations as perm
from math import factorial
import numpy as np
from sacorg.utils.utils import binomial
def is_graphical(deg_seq, method="Erdos-Gallai", is_sorted=False):
"""
Checks whether given degree sequence d is graphical or not
:param deg_seq: given degree sequence
:param method: method to check given degree sequence is graphical or not
:param is_sorted
:return: boolean value representing whether given sequence is graphical or not
"""
# Copy the given degree sequence
d = np.asarray(deg_seq).copy()
# If the length of the sequence is 0, it is graphical
if len(d) == 0:
return True
# All degrees must be greater than zero
if np.any(d < 0):
return False
# The sum of degrees must be even
if sum(d) % 2:
return False
# Sort the sequence in non-increasing order
if is_sorted is False:
d = np.sort(d)[::-1]
# Get the length of the sequence
N = len(d)
"""
Implementation of Erdos-Gallai Theorem
"""
if method == "Erdos-Gallai":
# Check all n conditions
for k in range(1, N + 1):
if sum(d[0:k]) > k * (k - 1) + sum([min(d_i, k) for d_i in d[k:N]]):
return False
# Return true, if all conditions are satisfied
return True
def generate_graph(deg_seq, method="Havel-Hakimi"):
"""
Generates a graph satisfying a given degree sequence by using the indicated method
:param deg_seq: Degree sequence
:param method: The method which will be used to generate graph
:return: A graph satisfying the degree sequence deg_seq with vertices starting from 1
"""
# Copy the degree sequence
res_seq = deg_seq.copy()
edges = []
# If the length of the sequence is zero or all elements are 0
if len(res_seq) == 0 or np.all(res_seq == 0):
return edges
if method == "Havel-Hakimi":
# Continue until all elements of the degree sequence become 0
while np.any(res_seq > 0):
# Sort the sequence in descending order
sorted_inx = np.argsort(res_seq)[::-1]
# Choose a vertex having non-zero degree
chosen_inx = np.random.choice(np.where(res_seq > 0)[0], size=1)[0]
i = 0
while res_seq[chosen_inx] > 0:
if sorted_inx[i] != chosen_inx:
# Add edges where each pair of vertices is placed in increasing order
if sorted_inx[i] < chosen_inx:
edges.append([sorted_inx[i]+1, chosen_inx+1])
else:
edges.append([chosen_inx+1, sorted_inx[i]+1])
# Subtract by 1
res_seq[chosen_inx] -= 1
res_seq[sorted_inx[i]] -= 1
i += 1
return edges
class MCMC:
"""
Edge-switching based MCMC method to generate uniformly distributed samples
from the set of simple graphs realizing a given degree sequence
"""
def __init__(self):
pass
def compute_perfect_matchings(self, vertices):
"""
Computes all possible perfect matchings of a given vertex set
:param vertices: A set of vertices
:return matchings: All perfect matchings of the vertex set 'vertices'
"""
# All elements must be different from each other
assert len(set(vertices)) == len(vertices), "All elements must be unique"
# The number of elements in vertices must be even
assert len(vertices) % 2 == 0, "The number of elements must be even"
# Get the number of vertices
n = len(vertices)
matchings = []
# Choose 2-cycles in the form of (01)...(45)(67)
inx = np.arange(0, n)
for p in perm(inx):
if np.sum([p[2*i+2] > p[2*i] for i in range(n/2-1)]) == n/2-1 and \
np.sum([p[2*i+1] > p[2*i] for i in range(n / 2)]) == n/2:
# Permute the vertices
permuted = [vertices[i] for i in p]
# Append the permuted sequence
matchings.append([[permuted[i*2], permuted[i*2+1]] for i in range(n/2)])
return matchings
def sample(self, initial_edges, iteration=-1):
"""
Performs edge-swithings on the given edge set 'initial_edges'
:param initial_edges: The initial edge set
:param iteration: The number of iterations
:return: Generates uniformly distributed sample
"""
# Copy the edge set
edges = list(initial_edges)
# Get the number of edges
num_of_edges = len(edges)
# If the number of iterations is not stated
if iteration < 0:
# it has been shown that for many networks, iterations = 100m seem to be adequate by the article
# "On the uniform generation of random graphs with prescribed degree sequences"
# R. Milo, N. Kashtan, S. Itzkovitz, M. E. J. Newman, U. Alon
iteration = 100*num_of_edges
switching_count = 0
for _ in range(iteration):
# Uniformly choose a number from (0,1) at random
r = np.random.uniform(low=0, high=1, size=1)
# If r is greater than or equal to 1/2
if r >= 0.5:
# Choose two non-adjacent edges
vertices = []
chosen_edges_inx = []
while len(vertices) != 4:
chosen_edges_inx = np.random.choice(range(num_of_edges), size=2, replace=False)
vertices = list(set(edges[chosen_edges_inx[0]] + edges[chosen_edges_inx[1]]))
# Compute all possible matchings
matchings = self.compute_perfect_matchings(vertices)
# Uniformly choose one of them at random
inx = np.random.choice(range(len(matchings)), size=1, replace=False)[0]
# If the proposed edges are not in the edge set, perform switching
chosen_matching = matchings[inx]
check_edge1 = np.sum([chosen_matching[0] == e for e in edges])
check_edge2 = np.sum([chosen_matching[1] == e for e in edges])
if check_edge1 == 0 and check_edge2 == 0:
# Perform switching
edge1 = edges[chosen_edges_inx[0]]
edge2 = edges[chosen_edges_inx[1]]
edges.remove(edge1)
edges.remove(edge2)
edges.append(chosen_matching[0])
edges.append(chosen_matching[1])
switching_count += 1
# Sort the edge sequences
edges.sort()
return edges, switching_count
def get_sample(self, deg_seq, num_of_samples, iteration=-1, verbose=False):
"""
Generates uniformly distributed samples satisfying a given degree sequence
:param deg_seq: Degree sequence
:param num_of_samples: Number of samples which will be generated
:param iteration: Number of iterations to generate each sample
:param verbose:
:return edges: Sequence of edge sequences, vertex labels start from 1
"""
# Get the initial time
time_start = time.clock()
average_switching_count = 0.0
# Generate an initial graph
initial_e = generate_graph(deg_seq=deg_seq, method="Havel-Hakimi")
edges = []
for _ in range(num_of_samples):
# Call the sample function
e, switching_count = self.sample(initial_edges=initial_e, iteration=iteration)
# Append the output sample
edges.append(e)
# Count the total edge switchings
average_switching_count += switching_count
# Average edge swithings
average_switching_count /= num_of_samples
# Get the total computation time
time_elapsed = (time.clock() - time_start)
if verbose is True:
print "Total computation time : " + str(time_elapsed)
print "Average edge switching count : " + str(average_switching_count)
# Return sequence of edge sequences
return edges
class BlitzsteinDiaconis:
"""
Implementation of sequential algorithm proposed by Blitzstein and Diaconis
"A Sequential Importance Sampling Algorithm for Generating Random Graphs with Prescribed Degrees"
Joseph Blitzstein and Persi Diaconis
"""
def __init__(self):
pass
def s(self, deg_seq):
"""
Generates a sample graph for a given degree sequence d
:param deg_seq: Given degree sequence
:return E, p, c: edges with vertex labels starting from 1,
probability of the generated graph and
the number of edge combinations
"""
# Copy the given degree sequence to use it as residual sequence
r = deg_seq.copy()
p = 1.0 # probability of the generated graph
c = 1 # the number of edge combinations for the same graph that can be generated by the algorithm
E = [] # list of edges
N = len(r) # length of the sequence
adjacentVertices = [[] for _ in range(N)] # stores the vertices which are adjacent
# run until residual sequence completely becomes 0 vector
while np.any(r != 0):
# Get the index of vertex having minimum degree greater than 0
i = np.where(r == np.amin(r[r > 0]))[0][-1]
c *= factorial(r[i])
while r[i] != 0:
J = np.asarray([], dtype=np.int) # Construct candidate list J
possibleVertices = [o for o in np.arange(N) if (r[o] > 0 and o != i and (o not in adjacentVertices[i]))]
for j in possibleVertices:
# Decrease degrees by one
(r[i], r[j]) = (r[i] - 1, r[j] - 1)
# add the the vertex j to candidate list J, if residual sequence is graphical
if is_graphical(r):
J = np.append(J, j)
# Increase degrees by one
(r[i], r[j]) = (r[i] + 1, r[j] + 1)
# Pick a vertex j in the candidate list J with probability proportional to its degree d_j
degrees = np.asarray([r[u] for u in J])
prob = degrees / float(np.sum(degrees))
j = np.random.choice(J, p=prob, size=1)[0]
# Add the found edge to the edge lists
if i < j:
E.append([i+1, j+1]) # indices start from 1
else:
E.append([j+1, i+1]) # indices start from 1
# Add the chosen vertex to the list in order to not choose it again
adjacentVertices[i].append(j)
# Decrease degrees by 1
(r[i], r[j]) = (r[i] - 1, r[j] - 1)
p *= prob[J == j][0]
# Sort the edge sequences
E.sort()
return E, p, c
def get_sample(self, deg_seq, num_of_samples, verbose=False):
"""
Generates graphs realizing the degree sequence 'deg_seq' with vertex labels {1,...,len(deg_seq}}
:param deg_seq: Degree sequence
:param num_of_samples: Number of samples which will be generated
:return:
"""
# Get the initial time
time_start = time.clock()
# If the sequence is empty or is not graphical
if len(deg_seq) == 0 or is_graphical(deg_seq) is False:
if verbose is True:
# Get the total computation time
time_elapsed = (time.clock() - time_start)
print "Total computation time : " + str(time_elapsed)
return []
edges = []
for _ in range(num_of_samples):
# Call the s function
e, p, c = self.s(deg_seq)
# Append the edges
edges.append(e)
# Get the total computation time
time_elapsed = (time.clock() - time_start)
if verbose is True:
print "Total computation time : " + str(time_elapsed)
# Return the edges
return edges
def count(self, deg_seq, num_of_samples=1000, verbose=False):
"""
Estimates the number of graphs satisfying the degree sequence
:param deq_seq: Degree sequence
:param num_of_samples: number of samples used in estimation
:return estimation, std: Estimation for the number of graphs satisfying the given degree sequence d
and standard deviation
"""
estimate = 0.0
# Get initial time
time_start = time.clock()
# If the sequence is empty or is not graphical
if len(deg_seq) == 0 or is_graphical(deg_seq) is False:
if verbose is True:
# Get the total computation time
time_elapsed = (time.clock() - time_start)
print "Total computation time : " + str(time_elapsed)
return 0.0, 0.0
weights = np.zeros(num_of_samples, dtype=float)
for i in range(num_of_samples):
(edges, p, c) = self.s(deg_seq)
weights[i] = 1.0 / float(c * p)
estimate = (1.0 / float(num_of_samples)) * np.sum(weights)
std = np.std(weights, ddof=1)
# Get the total computation time
time_elapsed = (time.clock() - time_start)
if verbose is True:
print "Total computation time : " + str(time_elapsed)
return estimate, std
class MyAlg:
"""
Exact counting and uniform sampling algorithm from the set of simple graphs realizing a given degree sequence
"""
def __init__(self):
pass
def vector_of_counts(self, deg_seq, dim=-1):
"""
Compute the vector of counts for deg_seq
:param deg_seq: degree sequence
:param dim: length of the output sequence
:return vector_of_counts: vector of counts for deg_seq
"""
if dim < 0:
dim = max(deg_seq)
vector_of_counts = np.asarray([np.sum(deg_seq == value) for value in range(1, dim+1)])
return vector_of_counts
def conjugate(self, deg_seq, dim=-1):
"""
Computes the conjugate of the given degree sequence deg_seq
:param deg_seq: degree sequence
:param dim: length of the output sequence
:return conj_d: conjugate of deg_seq
"""
if dim < 0:
dim = max(deg_seq)
if len(deg_seq) > 0:
conj_d = np.asarray([np.sum(deg_seq >= num) for num in np.arange(1, dim + 1)])
else:
conj_d = np.zeros((1, dim), dtype=np.int)
return conj_d
def xsi_tilde(self, deg_seq, s, value):
"""
Subtract 1 from 's' elements of deg_seq, which equal to value by starting from the end of the sequence deg_seq
:param deg_seq: degree sequence
:param s: number of elements
:param value:
:return d: if deg_seq is non-increasing sequence, than the output sequence d is also in non-increasing order
"""
d = deg_seq.copy()
count = 0
inx = 0
while count < s:
if d[-1-inx] == value:
d[-1-inx] -= 1
count += 1
inx += 1
return d
def convert2binary(self, deg_seq, matrix_s):
"""
Converts a given counts matrix to a binary matrix
:param deg_seq: degree sequence
:param matrix_s:
:return binary_matrix: a binary matrix with row and column sums equal to deg_seq
"""
n = len(deg_seq)
a = max(deg_seq)
# Copy the column sum q
d = deg_seq.copy()
binary_matrix = np.zeros((n, n), dtype=np.int)
for i in range(n):
# if the table is not completely filled
if np.sum(d) > 0:
# Find the indices having the highest degree greater than zero
row_indices = [inx for inx in range(n) if d[inx] == max(d) and max(d) > 0]
# Uniformly choose a row index at random
row = np.random.choice(row_indices, 1, replace=False)
for val in range(1, a+1):
col_indices = [inx for inx in range(n) if d[inx] == val and row != inx]
if len(col_indices) > 0:
chosen_indices = np.random.choice(col_indices, matrix_s[i][val-1], replace=False)
binary_matrix[row, chosen_indices] = 1
d[chosen_indices] -= 1
d[row] = 0
binary_matrix = binary_matrix + binary_matrix.T
return binary_matrix
def matrix2edges(self, matrix):
"""
Converts a given binary matrix to the corresponding set of edges
:param matrix: Symmetric binary matrix having zero diagonal entries
:return edges: Set of edges where vertex labels start from 1
"""
# Get the row and column size
n = matrix.shape[0]
# Ouput edge sequence
edges = []
for i in range(n):
for j in range(i+1,n):
if matrix[i, j] == 1:
edges.append([i+1,j+1])
return edges
def kappa(self, d, a, i, j, row_sum, submatrix_count_values):
"""
Recursive function to compute the number of simple graphs realizing the degree sequence d
:param d: degree sequence
:param a: maximum element of the initial degree sequence d^0
:param i: row index from 0 to n-1
:param j: column index from 0 to a-1
:param row_sum: Sum of chosen elements up to j
:param submatrix_count_values:
:return: Number of simple graphs realizing the degree sequence d
"""
total = 0
if np.sum(d) == 0:
return 1
else:
if d[i] == row_sum:
# Set d[i] to 0
d[i] = 0
# Store submatrix counts to avoid from computing them every time
count_inx = tuple(self.vector_of_counts(d[i+1:], dim=a))
if count_inx in submatrix_count_values:
return submatrix_count_values[count_inx]
else:
total = self.kappa(d, a, i+1, 0, 0, submatrix_count_values)
submatrix_count_values[count_inx] = total
return total
else:
conj_r = np.append(self.conjugate(d[i+1:], a), 0)
conj_z = np.append(self.conjugate(d[i+j+1:], a), 0)
# Construct the vector of counts, r, from conjugate partition of q
r = np.asarray([conj_r[inx - 1] - conj_r[inx] for inx in np.arange(1, len(conj_r))] + [conj_r[-1]])
# Determine lower bound for the entry in ith row and jth column
lower_bound = max(0, d[i] - row_sum - conj_r[j + 1])
# Determine upper bound for the entry in ith row and jth column
gale_ryser_condition = np.sum(conj_z[0:j + 1]) - np.sum(d[i + 1:(i + 1) + j + 1]) + ((j+1)*(j+2))
upper_bound = min(min(r[j], d[i] - row_sum), gale_ryser_condition)
# Choose a value between bounds
for s in range(lower_bound, upper_bound + 1):
updated_d = self.xsi_tilde(deg_seq=d.copy(), s=s, value=j+1)
total += binomial(r[j], s) * self.kappa(updated_d, a, i, j+1, row_sum + s, submatrix_count_values)
return total
def sample(self, d, a, i, j, row_sum, sample_matrix, num_of_matrices, submatrix_count_values):
"""
Recursive function to generate a sample realizing the degree sequence d
:param d: degree sequence
:param a: maximum element of the initial degree sequence d^0
:param i: row index from 0 to n-1
:param j: column index from 0 to a-1
:param row_sum: Sum of chosen elements up to j
:param sample_matrix:
:param num_of_matrices:
:param submatrix_count_values:
:return:
"""
if np.sum(d) == 0:
return
else:
if d[i] == row_sum:
# Set d[i] to 0
d[i] = 0
self.sample(d, a, i+1, 0, 0, sample_matrix, num_of_matrices, submatrix_count_values)
else:
conj_r = np.append(self.conjugate(d[i + 1:], a), 0)
conj_z = np.append(self.conjugate(d[i + j + 1:], a), 0)
# Construct the vector of counts, r, from conjugate partition of q
r = np.asarray([conj_r[inx - 1] - conj_r[inx] for inx in np.arange(1, len(conj_r))] + [conj_r[-1]])
# Determine lower bound for the entry in ith row and jth column
lower_bound = max(0, d[i] - row_sum - conj_r[j + 1])
# Determine upper bound for the entry in ith row and jth column
gale_ryser_condition = np.sum(conj_z[0:j + 1]) - np.sum(d[i + 1:(i + 1) + j + 1]) + ((j + 1) * (j + 2))
upper_bound = min(min(r[j], d[i] - row_sum), gale_ryser_condition)
# Sample uniformly from the set {0,1,...,num_of_matrices-1}
random_number = random.randint(0, (num_of_matrices - 1))
# Choose a value between bounds
total = 0
for s in range(lower_bound, upper_bound + 1):
updated_d = self.xsi_tilde(deg_seq=d, s=s, value=j + 1)
value = self.kappa(updated_d.copy(), a, i, j+1, row_sum + s, submatrix_count_values)
total += binomial(r[j], s) * value
if total > random_number:
sample_matrix[i][j] = s
self.sample(updated_d, a, i, j+1, row_sum + s, sample_matrix, value, submatrix_count_values)
return
raise Warning("Algorithm must not reach this line! Check the error!")
def count(self, deg_seq, verbose=False):
"""
Count the number of simple graphs realizing the degree sequence deg_seq
:param deg_seq: Degree sequence
:param verbose:
:return result: Number of simple graphs with degree sequence deg_seq
"""
# Copy the sequence
d = deg_seq.copy()
# Get initial time
time_start = time.clock()
# If the sequence is empty
if len(d) == 0:
if verbose is True:
# Get the total computation time
time_elapsed = (time.clock() - time_start)
print "Total computation time : " + str(time_elapsed)
return 1
# if the sequence is not graphical
if is_graphical(d) is False:
if verbose is True:
# Get the total computation time
time_elapsed = (time.clock() - time_start)
print "Total computation time : " + str(time_elapsed)
return 0
# Sort d in non-increasing order
d = np.sort(d)[::-1]
# Store subMatrix counts to avoid from computing them again
submatrix_count_values = {}
result = self.kappa(d, max(d), i=0, j=0, row_sum=0, submatrix_count_values=submatrix_count_values)
# Get the total computation time
time_elapsed = (time.clock() - time_start)
if verbose is True:
print "Total computation time : " + str(time_elapsed)
return result
def get_sample(self, deg_seq, num_of_samples, verbose=False):
"""
Generates uniformly distributed samples satisfying a given degree sequence
:param deg_seq: Degree sequence
:param num_of_samples: Number of samples which will be generated
:param verbose:
:return edges: Sequence of edge sequences, vertex labels start from 1
"""
# Copy the sequence
d = deg_seq.copy()
# Get initial time
time_start = time.clock()
# If the sequence is empty or is not graphical
if len(d) == 0 or is_graphical(d) is False:
if verbose is True:
# Get the total computation time
time_elapsed = (time.clock() - time_start)
print "Total computation time : " + str(time_elapsed)
return []
# Get the size of the sequence d
n = len(d)
# Get the maximum element of d
a = np.max(d)
# Store the sorted indices
inx_order = np.argsort(d)
inx_order = inx_order[::-1]
inx_order = np.argsort(inx_order)
# Sort d in descending order
d = np.sort(d)[::-1]
# Store subMatrix counts to avoid from computing them again
submatrix_count_values = {}
num_of_matrices = self.kappa(d.copy(), a, i=0, j=0, row_sum=0, submatrix_count_values=submatrix_count_values)
edges = []
for k in range(num_of_samples):
sample_matrix_s = np.zeros((n, a), dtype=np.int)
self.sample(d.copy(), max(d), 0, 0, 0, sample_matrix_s, num_of_matrices, submatrix_count_values)
binary_matrix = self.convert2binary(d, sample_matrix_s)
binary_matrix = binary_matrix[inx_order, :][:, inx_order]
e = self.matrix2edges(binary_matrix)
edges.append(e)
# Get the total computation time
time_elapsed = (time.clock() - time_start)
if verbose is True:
print "Total computation time : " + str(time_elapsed)
return edges
|
abdcelikkanat/sacorg
|
unused/simple2.py
|
Python
|
apache-2.0
| 25,791
|
# -*- python -*-
# This file is part of pybliographer
#
# Copyright (C) 1998-2004 Frederic GOBRY
# Email : gobry@pybliographer.org
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#
import os, sys, string
import locale
charset = locale.getlocale () [1] or 'ascii'
if len (sys.argv) < 4 or len (sys.argv) > 5:
print _("usage: pybconvert <source>..<target> <input> [output]").encode (charset)
sys.exit (1)
format = sys.argv [2]
try:
source, target = string.split (format, '..')
except:
print _("pybconvert: error: bad conversion format").encode (charset)
sys.exit (1)
from Legacy import Open
f_in = sys.argv [3]
if len (sys.argv) == 4:
f_out = sys.stdout
else:
f_out = open (sys.argv [4], 'w')
database = Open.bibopen (f_in, source)
Open.bibwrite (database.iterator (), how = target, out = f_out)
|
zkota/pyblio-1.3
|
scripts/pybconvert.py
|
Python
|
gpl-2.0
| 1,510
|
'''
Created on Feb 20, 2013
@author: maribelacosta
'''
class Revision(object):
'''
classdocs
'''
def __init__(self):
self.id = 0 # Fake sequential id. Starts in 0.
self.wikipedia_id = 0 # Wikipedia revision id.
self.contributor_id = 0; # Id of the contributor who performed the revision.
self.contributor_name = '' # Name of the contributor who performed the revision.
self.contributor_ip = '' # Name of the contributor who performed the revision.
self.paragraphs = {} # Dictionary of paragraphs. It is of the form {paragraph_hash : [Paragraph]}.
self.ordered_paragraphs = [] # Ordered paragraph hash.
self.length = 0 # Content length (bytes).
self.content = '' #TODO: this should be removed. Just for debugging process.
self.ordered_content = [] #TODO: this should be removed. Just for debugging process.
self.total_tokens = 0 # Number of tokens in the revision.
self.timestamp = 0
def __repr__(self):
return str(id(self))
def to_dict(self):
revision = {}
#json_revision.update({'id' : revisions[revision].wikipedia_id})
#revision.update({'author' : {'id' : self.contributor_id, 'name' : self.contributor_name}})
#json_revision.update({'length' : revisions[revision].length})
#json_revision.update({'paragraphs' : revisions[revision].ordered_paragraphs})
revision.update({'obj' : []})
for paragraph_hash in self.ordered_paragraphs:
p = []
for paragraph in self.paragraphs[paragraph_hash]:
p.append(repr(paragraph))
revision['obj'].append(p)
return revision
|
maribelacosta/wikiwho
|
structures/Revision.py
|
Python
|
mit
| 1,813
|
import sys
import os
import common
from common import USER_ROOT
class RemarkTests(common.AegisTestCase):
def test_wipe_cleared_data(self):
self.assertEqual(0, len(self.get("test_harness/remarks/a", auth=USER_ROOT).json()))
def test_root_create_remark(self):
self.assertEqual(200, self.post("test_harness/remarks/a", auth=USER_ROOT).status_code)
self.assertEqual(1, len(self.get("test_harness/remarks/a", auth=USER_ROOT).json()))
def test_root_create_remarks(self):
self.assertEqual(200, self.post("test_harness/remarks/a", auth=USER_ROOT).status_code)
self.assertEqual(200, self.post("test_harness/remarks/a", auth=USER_ROOT).status_code)
self.assertEqual(200, self.post("test_harness/remarks/a", auth=USER_ROOT).status_code)
self.assertEqual(3, len(self.get("test_harness/remarks/a", auth=USER_ROOT).json()))
def test_root_create_different_remarks(self):
self.assertEqual(200, self.post("test_harness/remarks/a", auth=USER_ROOT).status_code)
self.assertEqual(200, self.post("test_harness/remarks/b", auth=USER_ROOT).status_code)
self.assertEqual(200, self.post("test_harness/remarks/b", auth=USER_ROOT).status_code)
self.assertEqual(200, self.post("test_harness/remarks/c", auth=USER_ROOT).status_code)
self.assertEqual(200, self.post("test_harness/remarks/c", auth=USER_ROOT).status_code)
self.assertEqual(200, self.post("test_harness/remarks/c", auth=USER_ROOT).status_code)
self.assertEqual(1, len(self.get("test_harness/remarks/a", auth=USER_ROOT).json()))
self.assertEqual(2, len(self.get("test_harness/remarks/b", auth=USER_ROOT).json()))
self.assertEqual(3, len(self.get("test_harness/remarks/c", auth=USER_ROOT).json()))
if __name__ == "__main__":
unittest.main()
|
AegisTools/aegis-appengine
|
tests/api/remarks.py
|
Python
|
gpl-2.0
| 1,838
|
from .base import *
DEBUG = True
|
dyzajash/TB_cms
|
tb_cms/settings/testing.py
|
Python
|
bsd-3-clause
| 34
|
#!/usr/bin/env python
"""
test_pacman-mirrors
----------------------------------
Tests for `pacman-mirrors` module.
"""
import unittest
from unittest.mock import patch
from pacman_mirrors.functions import httpFn, config_setup, cliFn, defaultFn
from pacman_mirrors.pacman_mirrors import PacmanMirrors
from . import mock_configuration as mock
test_conf = {
"branch": "stable",
"branches": mock.BRANCHES,
"config_file": mock.CONFIG_FILE,
"custom_file": mock.CUSTOM_FILE,
"method": "rank",
"mirror_file": mock.MIRROR_FILE,
"mirror_list": mock.MIRROR_LIST,
"no_update": False,
"country_pool": [],
"protocols": [],
"repo_arch": mock.REPO_ARCH,
"status_file": mock.STATUS_FILE,
"ssl_verify": True,
"test_file": mock.TEST_FILE,
"url_mirrors_json": mock.URL_MIRROR_JSON,
"url_status_json": mock.URL_STATUS_JSON,
"work_dir": mock.WORK_DIR,
"x32": False
}
class TestHttpFn(unittest.TestCase):
"""Pacman Mirrors Test suite"""
def setUp(self):
"""Setup tests"""
pass
@patch("os.getuid")
@patch.object(httpFn, "get_ip_country")
@patch.object(config_setup, "setup_config")
def test_geoip_available(self,
mock_build_config,
mock_get_geoip_country,
mock_os_getuid):
"""TEST: geoip country IS avaiable"""
mock_os_getuid.return_value = 0
mock_build_config.return_value = test_conf
mock_get_geoip_country.return_value = ["Denmark"]
with unittest.mock.patch("sys.argv",
["pacman-mirrors",
"--geoip"]):
app = PacmanMirrors()
app.config = config_setup.setup_config()
cliFn.parse_command_line(app, True)
defaultFn.load_default_mirror_pool(app)
app.selected_countries = httpFn.get_ip_country()
assert app.selected_countries == ["Denmark"]
# @patch("os.getuid")
# @patch.object(httpFn, "get_ip_country")
# @patch.object(configFn, "setup_config")
# def test_geoip_not_available(self,
# mock_build_config,
# mock_get_geoip_country,
# mock_os_getuid):
# """TEST: geoip country IS NOT available"""
# mock_os_getuid.return_value = 0
# mock_get_geoip_country.return_value = "Antarctica"
# mock_build_config.return_value = test_conf
# with unittest.mock.patch("sys.argv",
# ["pacman-mirrors",
# "--geoip"]):
# app = PacmanMirrors()
# app.config = configFn.setup_config()
# cli.parse_command_line(app, True)
# defaultFn.load_default_mirror_pool(app)
# assert app.selected_countries == app.mirrors.country_pool
def tearDown(self):
"""Tear down"""
pass
if __name__ == "__main__":
unittest.main()
|
fhdk/pacman-mirrors
|
tests/test_httpfn.py
|
Python
|
gpl-3.0
| 3,056
|
#!/usr/bin/python
from __future__ import print_function
"""
# Boot test routines.
"""
from boot import * # get the test engine
@ok # run+test something at load time
def noop(): return True # never fails
@ok # ditto
def oops(): 1/0 # always fails
@ok # eg3: test the test engine
def unittestok():
ok(oops, # "ok" accepts multiple arguments
noop, # can be named functions
lambda: 1+1, # or anonymous functions
lambda: 1/0
)
ok(oops) # ok can also run with 1 test
ok(oops,noop)
# note that, when runm we never see 'unitest fail'
assert unittest.tries == 10, 'unit test fail'
assert unittest.fails == 5, 'unit test fail'
print("\n"+"EXPECT...... # TRIES= 10 FAIL= 5 %PASS = 67%")
print("GOT.........",unittest.score())
|
boddulavineela/mase
|
src/old/bootok.py
|
Python
|
unlicense
| 772
|
SUCCESS = 0
ERROR = 1
UNKNOWN_ERROR = 2
VIRTUALENV_NOT_FOUND = 3
NO_MATCHES_FOUND = 23
|
ktan2020/legacy-automation
|
win/Lib/site-packages/pip-1.3.1-py2.7.egg/pip/status_codes.py
|
Python
|
mit
| 87
|
from django import forms
from micro_admin.models import User, career
class ChangePasswordForm(forms.Form):
oldpassword = forms.CharField()
newpassword = forms.CharField()
retypepassword = forms.CharField()
class UserForm(forms.ModelForm):
class Meta:
model = User
exclude = [
'username',
'date_joined',
'gender',
'website',
'last_login',
'area',
'fb_profile',
'tw_profile',
'ln_profile',
'google_plus_url'
]
class CareerForm(forms.ModelForm):
class Meta:
model = career
exclude = ['featured_image', 'slug', 'created_on']
|
ashwin31/MicroSite
|
micro_admin/forms.py
|
Python
|
gpl-2.0
| 753
|
import binascii
import itertools
import os
import random
import subprocess
from weaver.stack import WeaverNests
from weaver.util import Stash
def nstdir(path):
return os.path.join(CurrentNest().work_dir, path)
# Thoughts:
# - For shared files: fifo-0,push-async-1 is equivalent to fifo-0,pull-inf
TASKS = 25
SHARED = [
{
'count': 128,
'prefix': '1R-shared',
'size': lambda: random.randint(1, 64*2**10),
},
{
'count': 128,
'prefix': '1G-shared',
'size': lambda: 1*2**30,
},
{
'count': 64,
'prefix': '2G-shared',
'size': lambda: 2*2**30,
},
{
'count': 32,
'prefix': '4G-shared',
'size': lambda: 4*2**30,
},
{
'count': 16,
'prefix': '8G-shared',
'size': lambda: 8*2**30,
},
]
UNIQUE = [
# {
# 'count': 4,
# 'prefix': '2G',
# 'size': lambda: 2*2**30,
# },
# {
# 'count': 2,
# 'prefix': '4G',
# 'size': lambda: 4*2**30,
# },
]
consumer = ShellFunction('''
for f; do
test -e "$f" || exit 1
done
''', cmd_format = "{EXE} {ARG}")
producer = ShellFunction('''
touch "$1"
shift
while [ "$#" -ge 3 ]; do
openssl enc -aes-256-ctr -nosalt -pass pass:"$1" < /dev/zero 2> /dev/null | head -c "$2" > "$3"
shift
shift
shift
done
''', cmd_format = "{EXE} {ARG}")
gen = []
shared = []
for i in range(TASKS):
shared.append(nstdir('sync.%08d' % i))
for f in SHARED:
for i in range(f['count']):
path = nstdir((f['prefix'] + '.%08d') % i)
gen.append({'path': path, 'size': f['size']()})
shared.append(path)
for task in range(TASKS):
print("compiling task %d" % task)
inputs = []
inputs.extend(shared)
taskdir = nstdir('task.%08d' % task)
os.mkdir(taskdir)
for f in UNIQUE:
for i in range(f['count']):
path = os.path.join(taskdir, (f['prefix'] + '.%08d') % i)
inputs.append(path)
gen.append({'path': path, 'size': f['size']()})
consumer(arguments = inputs, inputs = inputs)
random.shuffle(gen)
def makerandoms(i, files):
sync = nstdir('sync.%08d' % i)
args = [sync]
outputs = [sync]
for f in files:
args.extend((binascii.hexlify(os.urandom(64)), f['size'], f['path']))
outputs.append(f['path'])
producer(arguments = args, outputs = outputs)
for i in range(TASKS):
makerandoms(i, gen[i::TASKS])
# vim: set sts=4 sw=4 ts=8 expandtab ft=python:
|
nkremerh/cctools
|
chirp/tools/workflows/pull-tests.py
|
Python
|
gpl-2.0
| 2,512
|
"""
This module describes the unlogged state of the default game.
The setting STATE_UNLOGGED should be set to the python path
of the state instance in this module.
"""
from src.commands.cmdset import CmdSet
from src.commands.default import unloggedin
class UnloggedinCmdSet(CmdSet):
"""
Sets up the unlogged cmdset.
"""
key = "DefaultUnloggedin"
priority = 0
def at_cmdset_creation(self):
"Populate the cmdset"
self.add(unloggedin.CmdUnconnectedConnect())
self.add(unloggedin.CmdUnconnectedCreate())
self.add(unloggedin.CmdUnconnectedQuit())
self.add(unloggedin.CmdUnconnectedLook())
self.add(unloggedin.CmdUnconnectedHelp())
|
TaliesinSkye/evennia
|
src/commands/default/cmdset_unloggedin.py
|
Python
|
bsd-3-clause
| 700
|
import numpy as np
from project.dat.property_steel import thermal
# EQUATIONS
# [5.66]
def _lambda_LT(W_ply, f_y, M_cr):
return np.sqrt(W_ply * f_y / M_cr)
# [5.65]
def _lambda_LT_theta_com(lambda_LT, k_y_theta_com, k_E_theta_com):
return lambda_LT * np.sqrt(k_y_theta_com / k_E_theta_com)
# [5.64]
def _alpha(f_y):
return 0.65 * np.sqrt(235/f_y)
# [5.63]
def _phi_LT_theta_com(alpha, lambda_LT_theta_com):
return 0.5 * (1 + alpha * lambda_LT_theta_com + lambda_LT_theta_com ** 2)
# [5.62]
def _chi_LT_fi(phi_LT_theta_com, lambda_LT_theta_com):
return 1 / (phi_LT_theta_com + np.sqrt(phi_LT_theta_com**2 - lambda_LT_theta_com**2))
# [5.61]
def _M_b_fi_t_Rd(chi_LT_fi, W_y, k_y_theta_com, f_y, gamma_M_fi):
return chi_LT_fi * W_y * k_y_theta_com * f_y / gamma_M_fi
# [5.59]
def _M_cr(C_1, C_2, I_z, I_t, I_w, k_z, k_w, z_g, L, E, G):
"""
:param C_1:
:param C_2:
:param I_z:
:param I_t:
:param I_w:
:param k_z:
:param k_w:
:param z_g:
:param L:
:param E:
:param G:
:return:
"""
a = C_1 * np.pi * E * I_z / (k_z * L) ^ 2
b = (k_z/k_w) ^ 2 * (I_w/I_z)
c = (k_z * L) ^ 2 * G * I_t / np.pi ^ 2/E/I_z
d = (C_2 * z_g) ^ 2
e = C_2*z_g
return a * np.sqrt(b + c + d) - e
# INPUT PARAMETERS
_k_y_theta = thermal("reduction factor for effective yield strength")
|
fuyans/janas
|
project/func/buckling.py
|
Python
|
mit
| 1,362
|
import sys
import traceback
import os
import time
import PyQt4
from PyQt4 import Qt, QtCore, QtGui, uic
from PyQt4.Qt import *
import numpy
from PyEngine3D.Utilities import Singleton, Attribute, Attributes
from PyEngine3D.UI import logger
from PyEngine3D.Common.Command import *
from .Widgets import InputDialogDemo
UI_FILENAME = os.path.join(os.path.split(__file__)[0], "MainWindow.ui")
class SpinBoxDelegate(QtGui.QItemDelegate):
def createEditor(self, parent, option, index):
editor = QtGui.QSpinBox(parent)
editor.setMinimum(0)
editor.setMaximum(100)
return editor
def setEditorData(self, spinBox, index):
value = index.model().data(index, QtCore.Qt.EditRole)
spinBox.setValue(value)
def setModelData(self, spinBox, model, index):
spinBox.interpretText()
value = spinBox.value()
model.setData(index, value, QtCore.Qt.EditRole)
def updateEditorGeometry(self, editor, option, index):
editor.setGeometry(option.rect)
def addDirtyMark(text):
if not text.startswith('*'):
return '*' + text
return text
def removeDirtyMark(text):
if text.startswith('*'):
return text[1:]
return text
def findTreeItem(parentItem, findItemName):
if type(parentItem) == QtGui.QTreeWidget:
for item in parentItem.findItems("", QtCore.Qt.MatchExactly):
if item.text(0) == findItemName:
return item
elif type(parentItem) == QtGui.QTreeWidgetItem:
for i in range(parentItem.childCount()):
item = parentItem.child(i)
if item.text(0) == findItemName:
return item
return None
class MessageThread(QtCore.QThread):
def __init__(self, cmdQueue):
QtCore.QThread.__init__(self)
self.running = True
self.cmdQueue = cmdQueue
self.limitDelta = 1.0 / 60.0 # 60fps
self.delta = 0.0
self.lastTime = 0.0
def run(self):
self.lastTime = time.time()
while self.running:
# Timer
self.delta = time.time() - self.lastTime
if self.delta < self.limitDelta:
time.sleep(self.limitDelta - self.delta)
# print(1.0/(time.time() - self.lastTime))
self.lastTime = time.time()
# Process recieved queues
if not self.cmdQueue.empty():
# receive value must be tuple type
cmd, value = self.cmdQueue.get()
cmdName = get_command_name(cmd)
# recieved queues
if cmd == COMMAND.CLOSE_UI:
self.running = False
# call binded signal event
self.emit(QtCore.SIGNAL(cmdName), value)
class MainWindow(QtGui.QMainWindow, Singleton):
def __init__(self, project_filename, cmdQueue, appCmdQueue, cmdPipe):
logger.info("Create MainWindow.")
super(MainWindow, self).__init__()
self.project_filename = project_filename
self.cmdQueue = cmdQueue
self.appCmdQueue = appCmdQueue
self.cmdPipe = cmdPipe
self.selected_item = None
self.selected_item_categoty = ''
self.isFillAttributeTree = False
# MessageThread
self.message_thread = MessageThread(self.cmdQueue)
self.message_thread.start()
self.connect(self.message_thread, QtCore.SIGNAL(get_command_name(COMMAND.CLOSE_UI)), self.exit)
# load ui file
uic.loadUi(UI_FILENAME, self)
# set windows title
self.set_window_title(project_filename if project_filename else "Default Project")
# exit
actionExit = self.findChild(QtGui.QAction, "actionExit")
QtCore.QObject.connect(actionExit, QtCore.SIGNAL("triggered()"), self.exit)
# project
actionNewProject = self.findChild(QtGui.QAction, "actionNewProject")
QtCore.QObject.connect(actionNewProject, QtCore.SIGNAL("triggered()"), self.new_project)
actionOpenProject = self.findChild(QtGui.QAction, "actionOpenProject")
QtCore.QObject.connect(actionOpenProject, QtCore.SIGNAL("triggered()"), self.open_project)
actionSaveProject = self.findChild(QtGui.QAction, "actionSaveProject")
QtCore.QObject.connect(actionSaveProject, QtCore.SIGNAL("triggered()"), self.save_project)
# scene
actionNewScene = self.findChild(QtGui.QAction, "actionNewScene")
QtCore.QObject.connect(actionNewScene, QtCore.SIGNAL("triggered()"), self.new_scene)
actionSaveScene = self.findChild(QtGui.QAction, "actionSaveScene")
QtCore.QObject.connect(actionSaveScene, QtCore.SIGNAL("triggered()"), self.save_scene)
# action draw mode
actionWireframe = self.findChild(QtGui.QAction, "actionWireframe")
actionShading = self.findChild(QtGui.QAction, "actionShading")
QtCore.QObject.connect(actionWireframe, QtCore.SIGNAL("triggered()"),
lambda: self.set_view_mode(COMMAND.VIEWMODE_WIREFRAME))
QtCore.QObject.connect(actionShading, QtCore.SIGNAL("triggered()"),
lambda: self.set_view_mode(COMMAND.VIEWMODE_SHADING))
# sort ui items
self.connect(self.message_thread, QtCore.SIGNAL(get_command_name(COMMAND.SORT_UI_ITEMS)), self.sort_items)
# Resource list
self.resourceListWidget = self.findChild(QtGui.QTreeWidget, "resourceListWidget")
self.resource_menu = QMenu()
self.resource_menu.addAction(self.tr("Load"), self.load_resource)
self.resource_menu.addAction(self.tr("Open"), self.openResource)
self.resource_menu.addAction(self.tr("Duplicate"), self.duplicate_resource)
self.resource_menu.addAction(self.tr("Save"), self.save_resource)
self.resource_menu.addAction(self.tr("Delete"), self.delete_resource)
self.resourceListWidget.setContextMenuPolicy(Qt.CustomContextMenu)
self.resourceListWidget.customContextMenuRequested.connect(self.openResourceMenu)
self.resourceListWidget.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.resourceListWidget.setSortingEnabled(True)
self.resourceListWidget.sortItems(0, 0)
self.resourceListWidget.sortItems(1, 0)
self.resourceListWidget.itemDoubleClicked.connect(self.load_resource)
self.resourceListWidget.itemClicked.connect(self.select_resource)
self.connect(self.message_thread, QtCore.SIGNAL(get_command_name(COMMAND.TRANS_RESOURCE_LIST)),
self.add_resource_list)
self.connect(self.message_thread, QtCore.SIGNAL(get_command_name(COMMAND.TRANS_RESOURCE_INFO)),
self.set_resource_info)
self.connect(self.message_thread, QtCore.SIGNAL(get_command_name(COMMAND.TRANS_RESOURCE_ATTRIBUTE)),
self.fill_resource_attribute)
self.connect(self.message_thread, QtCore.SIGNAL(get_command_name(COMMAND.DELETE_RESOURCE_INFO)),
self.delete_resource_info)
btn = self.findChild(QtGui.QPushButton, "btnOpenResource")
btn.clicked.connect(self.openResource)
btn = self.findChild(QtGui.QPushButton, "btnSaveResource")
btn.clicked.connect(self.save_resource)
btn = self.findChild(QtGui.QPushButton, "btnDeleteResource")
btn.clicked.connect(self.delete_resource)
btn = self.findChild(QtGui.QPushButton, "btnTest")
btn.clicked.connect(self.test)
btn = self.findChild(QtGui.QPushButton, "btnAddLight")
btn.clicked.connect(self.add_light)
# screen
self.connect(self.message_thread, QtCore.SIGNAL(get_command_name(COMMAND.TRANS_SCREEN_INFO)),
self.set_screen_info)
self.spinWidth = self.findChild(QtGui.QSpinBox, "spinWidth")
self.spinHeight = self.findChild(QtGui.QSpinBox, "spinHeight")
self.checkFullScreen = self.findChild(QtGui.QCheckBox, "checkFullScreen")
btn = self.findChild(QtGui.QPushButton, "btnChangeResolution")
btn.clicked.connect(self.change_resolution)
# render targets
self.comboRenderTargets = self.findChild(QtGui.QComboBox, "comboRenderTargets")
self.comboRenderTargets.activated.connect(self.view_rendertarget)
self.connect(self.message_thread, QtCore.SIGNAL(get_command_name(COMMAND.CLEAR_RENDERTARGET_LIST)),
self.clear_render_target_list)
self.connect(self.message_thread, QtCore.SIGNAL(get_command_name(COMMAND.TRANS_RENDERTARGET_INFO)),
self.add_render_target)
# rendering type
self.comboRenderingType = self.findChild(QtGui.QComboBox, "comboRenderingType")
self.comboRenderingType.currentIndexChanged.connect(self.set_rendering_type)
self.connect(self.message_thread, QtCore.SIGNAL(get_command_name(COMMAND.TRANS_RENDERING_TYPE_LIST)),
self.add_rendering_type)
# anti aliasing
self.comboAntiAliasing = self.findChild(QtGui.QComboBox, "comboAntiAliasing")
self.comboAntiAliasing.currentIndexChanged.connect(self.set_anti_aliasing)
self.connect(self.message_thread, QtCore.SIGNAL(get_command_name(COMMAND.TRANS_ANTIALIASING_LIST)),
self.add_anti_aliasing)
# game backend
self.comboGameBackend = self.findChild(QtGui.QComboBox, "comboGameBackend")
self.comboGameBackend.currentIndexChanged.connect(self.change_game_backend)
self.connect(self.message_thread, QtCore.SIGNAL(get_command_name(COMMAND.TRANS_GAME_BACKEND_LIST)),
self.add_game_backend)
self.connect(self.message_thread, QtCore.SIGNAL(get_command_name(COMMAND.TRANS_GAME_BACKEND_INDEX)),
self.set_game_backend_index)
# Object list
self.objectList = self.findChild(QtGui.QTreeWidget, "objectListWidget")
self.object_menu = QMenu()
self.object_menu.addAction(self.tr("Action"), self.action_object)
self.object_menu.addAction(self.tr("Remove"), self.delete_object)
self.objectList.setContextMenuPolicy(Qt.CustomContextMenu)
self.objectList.customContextMenuRequested.connect(self.openObjectMenu)
self.objectList.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.objectList.setSortingEnabled(True)
self.objectList.sortItems(0, 0)
self.objectList.sortItems(1, 0)
self.objectList.itemClicked.connect(self.select_object)
self.objectList.itemActivated.connect(self.select_object)
self.objectList.itemDoubleClicked.connect(self.focus_object)
self.connect(self.message_thread, QtCore.SIGNAL(get_command_name(COMMAND.DELETE_OBJECT_INFO)),
self.delete_object_info)
self.connect(self.message_thread, QtCore.SIGNAL(get_command_name(COMMAND.TRANS_OBJECT_INFO)),
self.add_object_info)
self.connect(self.message_thread, QtCore.SIGNAL(get_command_name(COMMAND.TRANS_OBJECT_ATTRIBUTE)),
self.fill_object_attribute)
self.connect(self.message_thread, QtCore.SIGNAL(get_command_name(COMMAND.CLEAR_OBJECT_LIST)),
self.clear_object_list)
btn = self.findChild(QtGui.QPushButton, "btnRemoveObject")
btn.clicked.connect(self.delete_object)
# Object attribute tree
self.attributeTree = self.findChild(QtGui.QTreeWidget, "attributeTree")
self.attributeTree.setEditTriggers(self.attributeTree.NoEditTriggers) # hook editable event
self.attributeTree.itemSelectionChanged.connect(self.checkEditable)
self.attributeTree.itemClicked.connect(self.checkEditable)
self.attributeTree.itemChanged.connect(self.attribute_changed)
# wait a UI_RUN message, and send success message
if self.cmdPipe:
self.cmdPipe.RecvAndSend(COMMAND.UI_RUN, None, COMMAND.UI_RUN_OK, None)
def keyPressEvent(self, e):
if e.key() == QtCore.Qt.Key_Escape:
self.exit()
# on closed event
def closeEvent(self, event):
# let the window close
logger.info("Bye")
event.accept()
self.exit()
# ------------------------- #
# Menu
# ------------------------- #
def exit(self, *args):
if args != () and args[0] is not None:
logger.info(*args)
self.appCmdQueue.put(COMMAND.CLOSE_APP)
self.close()
sys.exit()
def sort_items(self):
self.resourceListWidget.sortItems(0, 0)
self.resourceListWidget.sortItems(1, 0)
self.objectList.sortItems(0, 0)
self.objectList.sortItems(1, 0)
def new_project(self):
filename = QtGui.QFileDialog.getSaveFileName(self, 'New Project', os.path.join(".", "Projects"))
self.appCmdQueue.put(COMMAND.NEW_PROJECT, filename)
def open_project(self):
filename = QtGui.QFileDialog.getOpenFileName(self, 'Open File', os.path.join(".", "Projects"),
"Project file (*.project)\nAll files (*.*)")
self.appCmdQueue.put(COMMAND.OPEN_PROJECT, filename)
def save_project(self):
self.appCmdQueue.put(COMMAND.SAVE_PROJECT)
def new_scene(self):
self.appCmdQueue.put(COMMAND.NEW_SCENE)
def save_scene(self):
self.appCmdQueue.put(COMMAND.SAVE_SCENE)
def set_view_mode(self, mode):
self.appCmdQueue.put(mode)
def set_screen_info(self, screen_info):
width, height, full_screen = screen_info
self.spinWidth.setValue(width)
self.spinHeight.setValue(height)
self.checkFullScreen.setChecked(full_screen or False)
def clear_render_target_list(self):
self.comboRenderTargets.clear()
# Game Backend
def add_game_backend(self, game_backend_list):
for game_backend_name in game_backend_list:
self.comboGameBackend.addItem(game_backend_name)
def change_game_backend(self, game_backend_index):
self.appCmdQueue.put(COMMAND.CHANGE_GAME_BACKEND, game_backend_index)
def set_game_backend_index(self, game_backend_index):
self.comboGameBackend.setCurrentIndex(game_backend_index)
# Rendering Type
def add_rendering_type(self, rendering_type_list):
for rendering_type_name in rendering_type_list:
self.comboRenderingType.addItem(rendering_type_name)
def set_rendering_type(self, rendering_type_index):
self.appCmdQueue.put(COMMAND.SET_RENDERING_TYPE, rendering_type_index)
# Anti Aliasing
def add_anti_aliasing(self, anti_aliasing_list):
for anti_aliasing_name in anti_aliasing_list:
self.comboAntiAliasing.addItem(anti_aliasing_name)
def set_anti_aliasing(self, anti_aliasing_index):
self.appCmdQueue.put(COMMAND.SET_ANTIALIASING, anti_aliasing_index)
# Render Target
def add_render_target(self, rendertarget_name):
self.comboRenderTargets.addItem(rendertarget_name)
def view_rendertarget(self, rendertarget_index):
rendertarget_name = self.comboRenderTargets.itemText(rendertarget_index)
self.appCmdQueue.put(COMMAND.VIEW_RENDERTARGET, (rendertarget_index, rendertarget_name))
def change_resolution(self):
width = self.spinWidth.value()
height = self.spinHeight.value()
full_screen = self.checkFullScreen.isChecked()
screen_info = (width, height, full_screen)
self.appCmdQueue.put(COMMAND.CHANGE_RESOLUTION, screen_info)
def openResourceMenu(self, position):
self.resource_menu.exec_(self.resourceListWidget.viewport().mapToGlobal(position))
def openObjectMenu(self, position):
self.object_menu.exec_(self.objectList.viewport().mapToGlobal(position))
# ------------------------- #
# Widget - Propery Tree
# ------------------------- #
def checkEditable(self, item=None, column=0):
"""in your connected slot, you can implement any edit-or-not-logic. you want"""
if item is None:
item = self.attributeTree.currentItem()
column = self.attributeTree.currentColumn()
# e.g. to allow editing only of column and have not child item:
if column == 1 and item.childCount() == 0 and not self.isFillAttributeTree:
if item.dataType == bool:
item.setText(1, "True" if item.checkState(1) == QtCore.Qt.Checked else "False")
self.attributeTree.editItem(item, column)
def attribute_changed(self, item):
if not self.isFillAttributeTree and self.selected_item:
try:
# check value chaned
if item.oldValue == item.text(1):
return
item.oldValue = item.text(1)
index = item.index
# check array type, then combine components
parent = item.parent()
if type(parent) == QtGui.QTreeWidgetItem and parent.dataType in (tuple, list, numpy.ndarray):
attribute_name = parent.text(0)
value = []
for i in range(parent.childCount()):
child = parent.child(i)
value.append(child.dataType(child.text(1)))
# numpy array
if parent.dataType == numpy.ndarray:
value = numpy.array(value)
# list or tuple
else:
value = parent.dataType(value)
else:
attribute_name = item.text(0)
if item.dataType == bool:
value = item.dataType(item.text(1) == "True")
else:
value = item.dataType(item.text(1))
selectedItems = []
command = None
if self.selected_item_categoty == 'Object':
command = COMMAND.SET_OBJECT_ATTRIBUTE
selectedItems = self.objectList.selectedItems()
elif self.selected_item_categoty == 'Resource':
command = COMMAND.SET_RESOURCE_ATTRIBUTE
selectedItems = self.resourceListWidget.selectedItems()
for selectedItem in selectedItems:
selected_item_name = selectedItem.text(0)
selected_item_type = selectedItem.text(1)
# send changed data
self.appCmdQueue.put(command, (selected_item_name, selected_item_type, attribute_name, value, index))
except:
logger.error(traceback.format_exc())
# failed to convert string to dataType, so restore to old value
item.setText(1, item.oldValue)
def add_attribute(self, parent, attribute_name, value, depth=0, index=0):
item = QtGui.QTreeWidgetItem(parent)
item.setFlags(QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsUserCheckable)
item.setExpanded(True)
# attribute name and type
item.setText(0, attribute_name)
item.dataType = type(value)
item.remove = False # this is flag for remove item when Layout Refresh
item.depth = depth
item.index = index
# set value
if item.dataType == bool: # bool type
item.setCheckState(1, QtCore.Qt.Checked if value else QtCore.Qt.Unchecked)
item.setText(1, "True" if item.checkState(1) == QtCore.Qt.Checked else "False")
elif item.dataType in (tuple, list, numpy.ndarray): # set list type
item.setText(1, "") # set value to None
for i, itemValue in enumerate(value): # add child component
self.add_attribute(item, "[%d]" % i, itemValue, depth + 1, i)
else: # set general type value - int, float, string
item.setText(1, str(value))
item.oldValue = item.text(1) # set old value
def fill_resource_attribute(self, attributes):
self.selected_item = self.resourceListWidget.currentItem()
self.selected_item_categoty = 'Resource'
self.fill_attribute(attributes)
def fill_object_attribute(self, attributes):
self.selected_item = self.objectList.currentItem()
self.selected_item_categoty = 'Object'
self.fill_attribute(attributes)
def clear_attribute(self):
self.attributeTree.clear() # clear
def fill_attribute(self, attributes):
# lock edit attribute ui
self.isFillAttributeTree = True
self.clear_attribute()
# fill properties of selected object
attribute_values = list(attributes.get_attributes())
attribute_values.sort(key=lambda x: x.name)
for attribute in attribute_values:
self.add_attribute(self.attributeTree, attribute.name, attribute.value)
# self.showProperties()
# unlock edit attribute ui
self.isFillAttributeTree = False
def showProperties(self):
for item in self.attributeTree.findItems("", QtCore.Qt.MatchExactly | QtCore.Qt.MatchRecursive):
print(item.text(0), item.text(1))
# ------------------------- #
# Widget - Resource List
# ------------------------- #
def get_selected_resource(self):
return self.resourceListWidget.selectedItems()
def add_resource_list(self, resourceList):
for resName, resType in resourceList:
item = QtGui.QTreeWidgetItem(self.resourceListWidget)
item.setText(0, resName)
item.setText(1, resType)
def set_resource_info(self, resource_info):
resource_name, resource_type, is_loaded = resource_info
items = self.resourceListWidget.findItems(resource_name, QtCore.Qt.MatchExactly, column=0)
for item in items:
if item.text(1) == resource_type:
break
else:
item = QtGui.QTreeWidgetItem(self.resourceListWidget)
item.is_loaded = is_loaded
fontColor = 'black' if is_loaded else 'gray'
item.setTextColor(0, QtGui.QColor(fontColor))
item.setTextColor(1, QtGui.QColor(fontColor))
item.setText(0, resource_name)
item.setText(1, resource_type)
def select_resource(self):
items = self.get_selected_resource()
if items and len(items) > 0:
if items[0].is_loaded:
self.appCmdQueue.put(COMMAND.REQUEST_RESOURCE_ATTRIBUTE, (items[0].text(0), items[0].text(1)))
else:
self.clear_attribute()
def load_resource(self, item=None):
items = self.get_selected_resource()
for item in items:
self.appCmdQueue.put(COMMAND.LOAD_RESOURCE, (item.text(0), item.text(1)))
def openResource(self, item=None):
items = self.get_selected_resource()
for item in items:
self.appCmdQueue.put(COMMAND.ACTION_RESOURCE, (item.text(0), item.text(1)))
def duplicate_resource(self, item=None):
items = self.get_selected_resource()
for item in items:
self.appCmdQueue.put(COMMAND.DUPLICATE_RESOURCE, (item.text(0), item.text(1)))
def save_resource(self, item=None):
items = self.get_selected_resource()
for item in items:
self.appCmdQueue.put(COMMAND.SAVE_RESOURCE, (item.text(0), item.text(1)))
def delete_resource(self, item=None):
items = self.get_selected_resource()
if items and len(items) > 0:
contents = "\n".join(["%s : %s" % (item.text(1), item.text(0)) for item in items])
choice = QtGui.QMessageBox.question(self, 'Delete resource.',
"Are you sure you want to delete the\n%s?" % contents,
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if choice == QtGui.QMessageBox.Yes:
for item in items:
self.appCmdQueue.put(COMMAND.DELETE_RESOURCE, (item.text(0), item.text(1)))
def delete_resource_info(self, resource_info):
resource_name, resource_type_name, is_loaded = resource_info
items = self.resourceListWidget.findItems(resource_name, QtCore.Qt.MatchExactly, column=0)
for item in items:
if item.text(1) == resource_type_name:
index = self.resourceListWidget.indexOfTopLevelItem(item)
self.resourceListWidget.takeTopLevelItem(index)
def test(self):
myPopUp = InputDialogDemo(self, "Create Static Mesh")
myPopUp.exec_()
# ------------------------- #
# Widget - Object List
# ------------------------- #
def add_light(self):
self.appCmdQueue.put(COMMAND.ADD_LIGHT)
def add_object_info(self, object_info):
object_name, object_type = object_info
item = QtGui.QTreeWidgetItem(self.objectList)
item.setText(0, object_name)
item.setText(1, object_type)
def action_object(self, *args):
selectedItems = self.objectList.selectedItems()
for selectedItem in selectedItems:
self.appCmdQueue.put(COMMAND.ACTION_OBJECT, selectedItem.text(0))
def delete_object(self, *args):
selectedItems = self.objectList.selectedItems()
for selectedItem in selectedItems:
self.appCmdQueue.put(COMMAND.DELETE_OBJECT, selectedItem.text(0))
def delete_object_info(self, objName):
items = self.objectList.findItems(objName, QtCore.Qt.MatchExactly, column=0)
for item in items:
index = self.objectList.indexOfTopLevelItem(item)
self.objectList.takeTopLevelItem(index)
def clear_object_list(self, *args):
self.objectList.clear()
def select_object(self):
selectedItems = self.objectList.selectedItems()
if selectedItems:
item = selectedItems[0]
selected_objectName = item.text(0)
selected_objectTypeName = item.text(1)
# request selected object infomation to fill attribute widget
self.appCmdQueue.put(COMMAND.SET_OBJECT_SELECT, selected_objectName)
self.appCmdQueue.put(COMMAND.REQUEST_OBJECT_ATTRIBUTE, (selected_objectName, selected_objectTypeName))
def focus_object(self, item=None):
if item:
selected_objectName = item.text(0)
self.appCmdQueue.put(COMMAND.SET_OBJECT_FOCUS, selected_objectName)
def run_editor(project_filename, cmdQueue, appCmdQueue, cmdPipe):
"""process - QT Widget"""
app = QtGui.QApplication(sys.argv)
main_window = MainWindow.instance(project_filename, cmdQueue, appCmdQueue, cmdPipe)
main_window.show()
sys.exit(app.exec_())
|
ubuntunux/GuineaPig
|
PyEngine3D/UI/QT/MainWindow.py
|
Python
|
bsd-2-clause
| 26,801
|
import os
from winsys import dialogs, fs
DEFAULT = "temp.csv"
filename, = dialogs.dialog (
"Open a filename",
("Filename", DEFAULT, dialogs.get_filename)
)
if not fs.file (filename):
raise RuntimeError ("%s does not exist" % filename)
else:
os.startfile (filename)
|
one2pret/winsys
|
docs/cookbook/dialogs/ask_for_filename.py
|
Python
|
mit
| 286
|
# Author: Jason Lu
import matplotlib as mpl
mpl.use('TkAgg')
import matplotlib.pyplot as plt #导入图像库
plt.rcParams['font.sans-serif'] = ['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False #用来正常显示负号
plt.figure(figsize=(7, 5)) #创建图像区域,指定比例
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs' #定义标签
sizes = [15, 30, 45, 10] #每一块的比例
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral'] #每一块的颜色
explode = (0, 0.1, 0, 0) #突出显示,这里仅仅突出显示第二块(即'Hogs')
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%',
shadow=True, startangle=90)
plt.axis('equal') #显示为圆(避免比例压缩为椭圆)
plt.show()
|
jinzekid/codehub
|
python/数据分析/a3/3_6_饼图.py
|
Python
|
gpl-3.0
| 790
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Test documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 26 10:01:56 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'OnSSET'
copyright = '2019, KTH-dESA'
author = 'KTH Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Testdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Test.tex', 'Test Documentation',
'Kostas', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'test', 'Test Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Test', 'Test Documentation',
author, 'Test', 'One line description of project.',
'Miscellaneous'),
]
|
KTH-dESA/PyOnSSET
|
OnSSET_Manual/conf.py
|
Python
|
mit
| 4,675
|
import numpy as np
from stcad.source_dev.chip import Base_Chip
from stcad.source_dev.objects import Drum
import gdsCAD as cad
chipsize = 50
chip = Base_Chip('drum', chipsize, chipsize,label=False)
inductor = Drum(base_layer = 1,
sacrificial_layer = 2 ,
top_layer = 3,
outer_radius = 9,
head_radius = 7,
electrode_radius = 6,
cable_width = 0.5,
sacrificial_tail_width = 3,
sacrificial_tail_length = 3,
opening_width = 4,
N_holes = 3,
hole_angle = 45,
hole_distance_to_center = 4.5,
hole_distance_to_edge = 0.5,
name = '')
chip.add_component(inductor, (0,0))
chip.save_to_gds(show=False, save=True,loc='')
|
srpeiter/ChipDesignCad
|
testing_scripts/mario_drum.py
|
Python
|
gpl-3.0
| 674
|
# -*- coding: utf-8 -*-
import ast
import os
import requests
import models
from config import config, sqla
from gevent.pool import Pool
from helpers import random_str, down
base_path = config.get('photo', 'path')
base_path = os.path.join(base_path, 'celebrity')
cookies = {
'bid': ''
}
def create_down(str_urls, douban_id, category):
urls = ast.literal_eval(str_urls or "[]")
path = os.path.join(base_path, category)
for url in urls:
filename = str(douban_id) + '_' + url.split('/')[-1].strip('?')
cookies['bid'] = random_str(11)
down(url, cookies, path, filename)
def create_requests_and_save_datas(douban_id):
session = sqla['session']
cookies['bid'] = random_str(11)
celebrity = session.query(models.Celebrity).filter_by(
douban_id=douban_id
).one()
cover_url = celebrity.cover
thumbnail_cover_url = celebrity.thumbnail_cover
photos_url = celebrity.photos
thumbnail_photos_url = celebrity.thumbnail_photos
down(
cover_url,
cookies,
os.path.join(base_path, 'cover'),
str(douban_id)+'_'+cover_url.split('/')[-1].strip('?')
)
down(
thumbnail_cover_url,
cookies,
os.path.join(base_path, 'thumbnail_cover'),
str(douban_id)+'_'+cover_url.split('/')[-1].strip('?')
)
create_down(photos_url, douban_id, 'photos')
create_down(thumbnail_photos_url, douban_id, 'thumbnail_photos')
def task(douban_ids, pool_number):
pool = Pool(pool_number)
for douban_id in douban_ids:
pool.spawn(
create_requests_and_save_datas,
douban_id=douban_id
)
pool.join()
|
billvsme/videoSpider
|
webs/douban/tasks/down_celebrity_images.py
|
Python
|
mit
| 1,702
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Killer.py
# Manage running proc
#
# Copyright (C) 2012-2015 Tommy Alex. All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# Create: 2012-10-07 21:47
import DroidUi as Ui
def Killer():
while True:
all = Ui.Package.running()
one = Ui.choose('Choose to kill', all)
if one is None:
break
for name in one:
print('kill', name)
Ui.Package(name).stop()
if __name__ == '__main__':
Killer()
|
iptux/DroidUi
|
examples/Killer.py
|
Python
|
gpl-3.0
| 1,065
|
# CourseResource
from django.contrib.auth.models import User
from django.test import TestCase
from tastypie.test import ResourceTestCaseMixin
from oppia.tests.utils import get_api_key, get_api_url
class CourseResourceTest(ResourceTestCaseMixin, TestCase):
fixtures = ['user.json', 'oppia.json', 'permissions.json']
def setUp(self):
super(CourseResourceTest, self).setUp()
user = User.objects.get(username='demo')
admin = User.objects.get(username='admin')
self.auth_data = {
'username': 'demo',
'api_key': get_api_key(user=user).key,
}
self.admin_auth = {
'username': 'admin',
'api_key': get_api_key(user=admin).key
}
self.url = get_api_url('course')
# Post invalid
def test_post_invalid(self):
self.assertHttpMethodNotAllowed(self.api_client.post(self.url, format='json', data={}))
# test unauthorized
def test_unauthorized(self):
data = {
'username': 'demo',
'api_key': '1234',
}
self.assertHttpUnauthorized(self.api_client.get(self.url, format='json', data=data))
# test authorized
def test_authorized(self):
resp = self.api_client.get(self.url, format='json', data=self.auth_data)
self.assertHttpOK(resp)
# test contains courses (and right no of courses)
def test_has_courses(self):
resp = self.api_client.get(self.url, format='json', data=self.auth_data)
self.assertHttpOK(resp)
self.assertValidJSON(resp.content)
response_data = self.deserialize(resp)
self.assertTrue('courses' in response_data)
# should have 2 courses with the test data set
self.assertEquals(len(response_data['courses']), 2)
# check each course had a download url
for course in response_data['courses']:
self.assertTrue('url' in course)
self.assertTrue('shortname' in course)
self.assertTrue('title' in course)
self.assertTrue('version' in course)
def test_course_get_single(self):
resource_url = get_api_url('course', 1)
resp = self.api_client.get(resource_url, format='json', data=self.auth_data)
self.assertHttpOK(resp)
self.assertValidJSON(resp.content)
# check course format
course = self.deserialize(resp)
self.assertTrue('shortname' in course)
self.assertTrue('title' in course)
self.assertTrue('description' in course)
self.assertTrue('version' in course)
def test_course_get_single_not_found(self):
resource_url = get_api_url('course', 999)
resp = self.api_client.get(resource_url, format='json', data=self.auth_data)
self.assertHttpNotFound(resp)
def test_course_get_single_draft_nonvisible(self):
resource_url = get_api_url('course', 3)
resp = self.api_client.get(resource_url, format='json', data=self.auth_data)
self.assertHttpNotFound(resp)
def test_course_get_single_draft_admin_visible(self):
resource_url = get_api_url('course', 3)
resp = self.api_client.get(resource_url, format='json', data=self.admin_auth)
self.assertHttpOK(resp)
self.assertValidJSON(resp.content)
def test_course_download_file_zip_not_found(self):
resource_url = get_api_url('course', 2) + 'download/'
resp = self.api_client.get(resource_url, format='json', data=self.auth_data)
self.assertHttpNotFound(resp)
def test_course_download_file_course_not_found(self):
resource_url = get_api_url('course', 999) + 'download/'
resp = self.api_client.get(resource_url, format='json', data=self.auth_data)
self.assertHttpNotFound(resp)
def test_course_download_draft_nonvisible(self):
resource_url = get_api_url('course', 3) + 'download/'
resp = self.api_client.get(resource_url, format='json', data=self.auth_data)
self.assertHttpNotFound(resp)
def test_course_get_activity(self):
resource_url = get_api_url('course', 1) + 'activity/'
resp = self.api_client.get(resource_url, format='json', data=self.auth_data)
self.assertHttpOK(resp)
def test_course_get_activity_notfound(self):
resource_url = get_api_url('course', 999) + 'activity/'
resp = self.api_client.get(resource_url, format='json', data=self.auth_data)
self.assertHttpNotFound(resp)
def test_course_get_activity_draft_nonvisible(self):
resource_url = get_api_url('course', 3) + 'activity/'
resp = self.api_client.get(resource_url, format='json', data=self.auth_data)
self.assertHttpNotFound(resp)
def test_course_get_acitivity_draft_admin_visible(self):
resource_url = get_api_url('course', 3) + 'activity/'
resp = self.api_client.get(resource_url, format='json', data=self.admin_auth)
self.assertHttpOK(resp)
|
DigitalCampus/django-nurhi-oppia
|
oppia/tests/api/test_course.py
|
Python
|
gpl-3.0
| 4,962
|
# Copyright 2011-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, absolute_import
"""GridFS implementation for Motor, an asynchronous driver for MongoDB."""
import textwrap
import gridfs
import pymongo
import pymongo.errors
from gridfs import grid_file
from motor.core import (AgnosticBaseCursor,
AgnosticCollection,
AgnosticDatabase,
PY35)
from motor.docstrings import *
from motor.metaprogramming import (AsyncCommand,
AsyncRead,
coroutine_annotation,
create_class_with_framework,
DelegateMethod,
motor_coroutine,
MotorCursorChainingMethod,
ReadOnlyProperty)
class AgnosticGridOutCursor(AgnosticBaseCursor):
__motor_class_name__ = 'MotorGridOutCursor'
__delegate_class__ = gridfs.GridOutCursor
add_option = MotorCursorChainingMethod()
address = ReadOnlyProperty()
collation = ReadOnlyProperty()
comment = MotorCursorChainingMethod()
distinct = AsyncRead()
explain = AsyncRead()
hint = MotorCursorChainingMethod()
limit = MotorCursorChainingMethod()
max = MotorCursorChainingMethod()
max_await_time_ms = MotorCursorChainingMethod()
max_scan = MotorCursorChainingMethod()
max_time_ms = MotorCursorChainingMethod()
min = MotorCursorChainingMethod()
remove_option = MotorCursorChainingMethod()
skip = MotorCursorChainingMethod()
sort = MotorCursorChainingMethod(doc=cursor_sort_doc)
where = MotorCursorChainingMethod()
# PyMongo's GridOutCursor inherits __die from Cursor.
_Cursor__die = AsyncCommand()
def clone(self):
"""Get a clone of this cursor."""
return self.__class__(self.delegate.clone(), self.collection)
def next_object(self):
"""Get next GridOut object from cursor."""
grid_out = super(self.__class__, self).next_object()
if grid_out:
grid_out_class = create_class_with_framework(
AgnosticGridOut, self._framework, self.__module__)
return grid_out_class(self.collection, delegate=grid_out)
else:
# Exhausted.
return None
def rewind(self):
"""Rewind this cursor to its unevaluated state."""
self.delegate.rewind()
self.started = False
return self
def _empty(self):
return self.delegate._Cursor__empty
def _query_flags(self):
return self.delegate._Cursor__query_flags
def _data(self):
return self.delegate._Cursor__data
def _clear_cursor_id(self):
self.delegate._Cursor__id = 0
def _close_exhaust_cursor(self):
# Exhaust MotorGridOutCursors are prohibited.
pass
def _killed(self):
return self.delegate._Cursor__killed
@motor_coroutine
def _close(self):
yield self._framework.yieldable(self._Cursor__die())
class MotorGridOutProperty(ReadOnlyProperty):
"""Creates a readonly attribute on the wrapped PyMongo GridOut."""
def create_attribute(self, cls, attr_name):
def fget(obj):
if not obj.delegate._file:
raise pymongo.errors.InvalidOperation(
"You must call MotorGridOut.open() before accessing "
"the %s property" % attr_name)
return getattr(obj.delegate, attr_name)
doc = getattr(cls.__delegate_class__, attr_name).__doc__
return property(fget=fget, doc=doc)
class AgnosticGridOut(object):
"""Class to read data out of GridFS.
MotorGridOut supports the same attributes as PyMongo's
:class:`~gridfs.grid_file.GridOut`, such as ``_id``, ``content_type``,
etc.
You don't need to instantiate this class directly - use the
methods provided by :class:`~motor.MotorGridFSBucket`. If it **is**
instantiated directly, call :meth:`open`, :meth:`read`, or
:meth:`readline` before accessing its attributes.
"""
__motor_class_name__ = 'MotorGridOut'
__delegate_class__ = gridfs.GridOut
_ensure_file = AsyncCommand()
_id = MotorGridOutProperty()
aliases = MotorGridOutProperty()
chunk_size = MotorGridOutProperty()
close = MotorGridOutProperty()
content_type = MotorGridOutProperty()
filename = MotorGridOutProperty()
length = MotorGridOutProperty()
md5 = MotorGridOutProperty()
metadata = MotorGridOutProperty()
name = MotorGridOutProperty()
read = AsyncRead()
readchunk = AsyncRead()
readline = AsyncRead()
seek = DelegateMethod()
tell = DelegateMethod()
upload_date = MotorGridOutProperty()
def __init__(
self,
root_collection,
file_id=None,
file_document=None,
delegate=None,
):
collection_class = create_class_with_framework(
AgnosticCollection, self._framework, self.__module__)
if not isinstance(root_collection, collection_class):
raise TypeError(
"First argument to MotorGridOut must be "
"MotorCollection, not %r" % root_collection)
if delegate:
self.delegate = delegate
else:
self.delegate = self.__delegate_class__(
root_collection.delegate,
file_id,
file_document)
self.io_loop = root_collection.get_io_loop()
# python.org/dev/peps/pep-0492/#api-design-and-implementation-revisions
if PY35:
exec(textwrap.dedent("""
def __aiter__(self):
return self
async def __anext__(self):
chunk = await self.readchunk()
if chunk:
return chunk
raise StopAsyncIteration()
"""), globals(), locals())
def __getattr__(self, item):
if not self.delegate._file:
raise pymongo.errors.InvalidOperation(
"You must call MotorGridOut.open() before accessing "
"the %s property" % item)
return getattr(self.delegate, item)
@coroutine_annotation
def open(self):
"""Retrieve this file's attributes from the server.
Returns a Future.
.. versionchanged:: 2.0
No longer accepts a callback argument.
.. versionchanged:: 0.2
:class:`~motor.MotorGridOut` now opens itself on demand, calling
``open`` explicitly is rarely needed.
"""
return self._framework.chain_return_value(self._ensure_file(),
self.get_io_loop(),
self)
def get_io_loop(self):
return self.io_loop
@motor_coroutine
def stream_to_handler(self, request_handler):
"""Write the contents of this file to a
:class:`tornado.web.RequestHandler`. This method calls
:meth:`~tornado.web.RequestHandler.flush` on
the RequestHandler, so ensure all headers have already been set.
For a more complete example see the implementation of
:class:`~motor.web.GridFSHandler`.
.. code-block:: python
class FileHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
@gen.coroutine
def get(self, filename):
db = self.settings['db']
fs = yield motor.MotorGridFSBucket(db())
try:
gridout = yield fs.open_download_stream_by_name(filename)
except gridfs.NoFile:
raise tornado.web.HTTPError(404)
self.set_header("Content-Type", gridout.content_type)
self.set_header("Content-Length", gridout.length)
yield gridout.stream_to_handler(self)
self.finish()
.. seealso:: Tornado `RequestHandler <http://tornadoweb.org/en/stable/web.html#request-handlers>`_
"""
written = 0
while written < self.length:
# Reading chunk_size at a time minimizes buffering.
f = self._framework.yieldable(self.read(self.chunk_size))
yield f
chunk = f.result()
# write() simply appends the output to a list; flush() sends it
# over the network and minimizes buffering in the handler.
request_handler.write(chunk)
request_handler.flush()
written += len(chunk)
class AgnosticGridIn(object):
__motor_class_name__ = 'MotorGridIn'
__delegate_class__ = gridfs.GridIn
__getattr__ = DelegateMethod()
abort = AsyncCommand()
closed = ReadOnlyProperty()
close = AsyncCommand()
write = AsyncCommand().unwrap('MotorGridOut')
writelines = AsyncCommand().unwrap('MotorGridOut')
_id = ReadOnlyProperty()
md5 = ReadOnlyProperty()
filename = ReadOnlyProperty()
name = ReadOnlyProperty()
content_type = ReadOnlyProperty()
length = ReadOnlyProperty()
chunk_size = ReadOnlyProperty()
upload_date = ReadOnlyProperty()
set = AsyncCommand(attr_name='__setattr__', doc="""
Set an arbitrary metadata attribute on the file. Stores value on the server
as a key-value pair within the file document once the file is closed. If
the file is already closed, calling :meth:`set` will immediately update the file
document on the server.
Metadata set on the file appears as attributes on a
:class:`~motor.MotorGridOut` object created from the file.
:Parameters:
- `name`: Name of the attribute, will be stored as a key in the file
document on the server
- `value`: Value of the attribute
""")
def __init__(self, root_collection, delegate=None, **kwargs):
"""
Class to write data to GridFS. Application developers should not
generally need to instantiate this class - see
:meth:`~motor.MotorGridFSBucket.open_upload_stream`.
Any of the file level options specified in the `GridFS Spec
<http://dochub.mongodb.org/core/gridfs>`_ may be passed as
keyword arguments. Any additional keyword arguments will be
set as additional fields on the file document. Valid keyword
arguments include:
- ``"_id"``: unique ID for this file (default:
:class:`~bson.objectid.ObjectId`) - this ``"_id"`` must
not have already been used for another file
- ``"filename"``: human name for the file
- ``"contentType"`` or ``"content_type"``: valid mime-type
for the file
- ``"chunkSize"`` or ``"chunk_size"``: size of each of the
chunks, in bytes (default: 256 kb)
- ``"encoding"``: encoding used for this file. In Python 2,
any :class:`unicode` that is written to the file will be
converted to a :class:`str`. In Python 3, any :class:`str`
that is written to the file will be converted to
:class:`bytes`.
:Parameters:
- `root_collection`: A :class:`~motor.MotorCollection`, the root
collection to write to
- `**kwargs` (optional): file level options (see above)
.. versionchanged:: 0.2
``open`` method removed, no longer needed.
"""
collection_class = create_class_with_framework(
AgnosticCollection, self._framework, self.__module__)
if not isinstance(root_collection, collection_class):
raise TypeError(
"First argument to MotorGridIn must be "
"MotorCollection, not %r" % root_collection)
self.io_loop = root_collection.get_io_loop()
if delegate:
# Short cut.
self.delegate = delegate
else:
self.delegate = self.__delegate_class__(
root_collection.delegate,
**kwargs)
if PY35:
# Support "async with bucket.open_upload_stream() as f:"
exec(textwrap.dedent("""
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
"""), globals(), locals())
def get_io_loop(self):
return self.io_loop
class AgnosticGridFSBucket(object):
__motor_class_name__ = 'MotorGridFSBucket'
__delegate_class__ = gridfs.GridFSBucket
delete = AsyncCommand()
download_to_stream = AsyncCommand()
download_to_stream_by_name = AsyncCommand()
open_download_stream = AsyncCommand().wrap(gridfs.GridOut)
open_download_stream_by_name = AsyncCommand().wrap(gridfs.GridOut)
open_upload_stream = DelegateMethod().wrap(gridfs.GridIn)
open_upload_stream_with_id = DelegateMethod().wrap(gridfs.GridIn)
rename = AsyncCommand()
upload_from_stream = AsyncCommand()
upload_from_stream_with_id = AsyncCommand()
def __init__(self, database, collection="fs", disable_md5=False):
"""Create a handle to a GridFS bucket.
Raises :exc:`~pymongo.errors.ConfigurationError` if `write_concern`
is not acknowledged.
This class conforms to the `GridFS API Spec
<https://github.com/mongodb/specifications/blob/master/source/gridfs/gridfs-spec.rst>`_
for MongoDB drivers.
:Parameters:
- `database`: database to use.
- `bucket_name` (optional): The name of the bucket. Defaults to 'fs'.
- `chunk_size_bytes` (optional): The chunk size in bytes. Defaults
to 255KB.
- `write_concern` (optional): The
:class:`~pymongo.write_concern.WriteConcern` to use. If ``None``
(the default) db.write_concern is used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) db.read_preference is used.
- `disable_md5` (optional): When True, MD5 checksums will not be
computed for uploaded files. Useful in environments where MD5
cannot be used for regulatory or other reasons. Defaults to False.
.. versionadded:: 1.0
.. mongodoc:: gridfs
"""
db_class = create_class_with_framework(
AgnosticDatabase, self._framework, self.__module__)
if not isinstance(database, db_class):
raise TypeError(
"First argument to %s must be MotorDatabase, not %r" % (
self.__class__, database))
self.io_loop = database.get_io_loop()
self.collection = database[collection]
self.delegate = self.__delegate_class__(
database.delegate,
collection,
disable_md5=disable_md5)
def get_io_loop(self):
return self.io_loop
def wrap(self, obj):
if obj.__class__ is grid_file.GridIn:
grid_in_class = create_class_with_framework(
AgnosticGridIn, self._framework, self.__module__)
return grid_in_class(
root_collection=self.collection,
delegate=obj)
elif obj.__class__ is grid_file.GridOut:
grid_out_class = create_class_with_framework(
AgnosticGridOut, self._framework, self.__module__)
return grid_out_class(
root_collection=self.collection,
delegate=obj)
elif obj.__class__ is gridfs.GridOutCursor:
grid_out_class = create_class_with_framework(
AgnosticGridOutCursor, self._framework, self.__module__)
return grid_out_class(
cursor=obj,
collection=self.collection)
def find(self, *args, **kwargs):
"""Find and return the files collection documents that match ``filter``.
Returns a cursor that iterates across files matching
arbitrary queries on the files collection. Can be combined
with other modifiers for additional control.
For example::
cursor = bucket.find({"filename": "lisa.txt"}, no_cursor_timeout=True)
while (yield cursor.fetch_next):
grid_out = cursor.next_object()
data = yield grid_out.read()
This iterates through all versions of "lisa.txt" stored in GridFS.
Note that setting no_cursor_timeout to True may be important to
prevent the cursor from timing out during long multi-file processing
work.
As another example, the call::
most_recent_three = fs.find().sort("uploadDate", -1).limit(3)
would return a cursor to the three most recently uploaded files
in GridFS.
Follows a similar interface to
:meth:`~motor.MotorCollection.find`
in :class:`~motor.MotorCollection`.
:Parameters:
- `filter`: Search query.
- `batch_size` (optional): The number of documents to return per
batch.
- `limit` (optional): The maximum number of documents to return.
- `no_cursor_timeout` (optional): The server normally times out idle
cursors after an inactivity period (10 minutes) to prevent excess
memory use. Set this option to True prevent that.
- `skip` (optional): The number of documents to skip before
returning.
- `sort` (optional): The order by which to sort results. Defaults to
None.
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`, created with
:meth:`~MotorClient.start_session`.
If a :class:`~pymongo.client_session.ClientSession` is passed to
:meth:`find`, all returned :class:`MotorGridOut` instances
are associated with that session.
.. versionchanged:: 1.2
Added session parameter.
"""
cursor = self.delegate.find(*args, **kwargs)
grid_out_cursor = create_class_with_framework(
AgnosticGridOutCursor, self._framework, self.__module__)
return grid_out_cursor(cursor, self.collection)
|
wujuguang/motor
|
motor/motor_gridfs.py
|
Python
|
apache-2.0
| 19,133
|
"""
===================
Canny edge detector
===================
The Canny filter is a multi-stage edge detector. It uses a filter based on the
derivative of a Gaussian in order to compute the intensity of the gradients.The
Gaussian reduces the effect of noise present in the image. Then, potential
edges are thinned down to 1-pixel curves by removing non-maximum pixels of the
gradient magnitude. Finally, edge pixels are kept or removed using hysteresis
thresholding on the gradient magnitude.
The Canny has three adjustable parameters: the width of the Gaussian (the
noisier the image, the greater the width), and the low and high threshold for
the hysteresis thresholding.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
from skimage import feature
# Generate noisy image of a square
im = np.zeros((128, 128))
im[32:-32, 32:-32] = 1
im = ndi.rotate(im, 15, mode='constant')
im = ndi.gaussian_filter(im, 4)
im += 0.2 * np.random.random(im.shape)
# Compute the Canny filter for two values of sigma
edges1 = feature.canny(im)
edges2 = feature.canny(im, sigma=3)
# display results
fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(8, 3))
ax1.imshow(im, cmap=plt.cm.jet)
ax1.axis('off')
ax1.set_title('noisy image', fontsize=20)
ax2.imshow(edges1, cmap=plt.cm.gray)
ax2.axis('off')
ax2.set_title('Canny filter, $\sigma=1$', fontsize=20)
ax3.imshow(edges2, cmap=plt.cm.gray)
ax3.axis('off')
ax3.set_title('Canny filter, $\sigma=3$', fontsize=20)
fig.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9,
bottom=0.02, left=0.02, right=0.98)
plt.show()
|
michaelpacer/scikit-image
|
doc/examples/plot_canny.py
|
Python
|
bsd-3-clause
| 1,633
|
#!/usr/bin/python
# Filename: cty.py
def load_cty(filename):
""" Load Country Information from plist file (http://www.country-files.com/cty/history.htm)"""
try:
import plistlib
country_list = plistlib.readPlist(filename)
return(country_list)
except:
return(False)
# End of cty.py
|
dh1tw/DX-Cluster-Parser
|
cty.py
|
Python
|
gpl-3.0
| 293
|
# -*- coding: utf-8 -*-
#
# Sibyl: A modular Python chat bot framework
# Copyright (c) 2015-2017 Joshua Haas <jahschwa.com>
#
# This file is part of Sibyl.
#
# Sibyl is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
from sibyl.lib.protocol import User,Room,Message,Protocol
from sibyl.lib.decorators import botconf
################################################################################
# Config options
################################################################################
@botconf
def conf(bot):
return []
################################################################################
# User sub-class
################################################################################
class MYUSER(User):
# called on object init; the following are already created by __init__:
# self.protocol = (Protocol) name of this User's protocol as a str
# self.typ = (int) either Message.PRIVATE or Message.GROUP
# self.real = (User) the "real" User behind this user (defaults to self)
# @param user (object) a full username
def parse(self,user):
raise NotImplementedError
# @return (str) the username in private chat or the nick name in a room
def get_name(self):
raise NotImplementedError
# @return (str) the username without resource identifier
def get_base(self):
raise NotImplementedError
# @param other (object) you must check for class equivalence
# @return (bool) True if self==other (including resource)
def __eq__(self,other):
raise NotImplementedError
# @return (str) the full username
def __str__(self):
raise NotImplementedError
################################################################################
# Room class
################################################################################
class MYROOM(Room):
# called on object init; the following are already created by __init__:
# self.protocol = name of this Room's protocol as a str
# self.nick = the nick name to use in the room (defaults to None)
# self.pword = the password for this room (defaults to None)
# @param name (object) a full roomid
def parse(self,name):
raise NotImplementedError
# the return value must be the same for equal Rooms and unique for different
# @return (str) the name of this Room
def get_name(self):
raise NotImplementedError
# @param other (object) you must check for class equivalence
# @return (bool) true if other is the same room (ignore nick/pword if present)
def __eq__(self,other):
raise NotImplementedError
################################################################################
# Protocol sub-class
################################################################################
class MYPROTOCOL(Protocol):
# called on bot init; the following are guaranteed to exist:
# self.bot = SibylBot instance
# self.log = the logger you should use
def setup(self):
raise NotImplementedError
# @raise (ConnectFailure) if can't connect to server
# @raise (AuthFailure) if failed to authenticate to server
def connect(self):
raise NotImplementedError
# @return (bool) True if we are connected to the server
def is_connected(self):
raise NotImplementedError
# receive/process messages and call bot._cb_message()
# must ignore msgs from myself and from users not in any of our rooms
# @call bot._cb_message(Message) upon receiving a valid status or message
# @raise (PingTimeout) if implemented
# @raise (ConnectFailure) if disconnected
# @raise (ServerShutdown) if server shutdown
def process(self):
raise NotImplementedError
# called when the bot is exiting for whatever reason
def shutdown(self):
raise NotImplementedError
# send a message to a user
# @param mess (Message) message to be sent
# @raise (ConnectFailure) if failed to send message
# Check: get_emote()
def send(self,mess):
raise NotImplementedError
# send a message with text to every user in a room
# optionally note that the broadcast was requested by a specific User
# @param mess (Message) the message to broadcast
# @return (str,unicode) the text that was actually sent
# Check: get_user(), get_users()
def broadcast(self,mess):
raise NotImplementedError
# join the specified room using the specified nick and password
# @param room (Room) the room to join
# @call bot._cb_join_room_success(room) on successful join
# @call bot._cb_join_room_failure(room,error) on failed join
def join_room(self,room):
raise NotImplementedError
# part the specified room
# @param room (Room) the room to leave
def part_room(self,room):
raise NotImplementedError
# helper function for get_rooms() for protocol-specific flags
# only needs to handle: FLAG_PARTED, FLAG_PENDING, FLAG_IN, FLAG_ALL
# @param flag (int) one of Room.FLAG_* enums
# @return (list of Room) rooms matching the flag
def _get_rooms(self,flag):
raise NotImplementedError
# @param room (Room) the room to query
# @return (list of User) the Users in the specified room
def get_occupants(self,room):
raise NotImplementedError
# @param room (Room) the room to query
# @return (str) the nick name we are using in the specified room
def get_nick(self,room):
raise NotImplementedError
# @param room (Room) the room to query
# @param nick (str) the nick to examine
# @return (User) the "real" User behind the specified nick/room
def get_real(self,room,nick):
raise NotImplementedError
# @return (User) our username
def get_user(self):
raise NotImplementedError
# @param user (str) a user id to parse
# @param typ (int) [Message.PRIVATE] either Message.GROUP or Message.PRIVATE
# @param real (User) [self] the "real" user behind this user
# @return (User) a new instance of this protocol's User subclass
def new_user(self,user,typ=None,real=None):
raise NotImplementedError
# @param name (object) the identifier for this Room
# @param nick (str) [None] the nick name to use in this Room
# @param pword (str) [None] the password for joining this Room
# @return (Room) a new instance of this protocol's Room subclass
def new_room(self,name,nick=None,pword=None):
raise NotImplementedError
################################################################################
# Helper functions
################################################################################
|
jfrederickson/sibyl
|
protocols/skeleton.py
|
Python
|
gpl-3.0
| 7,096
|
# -*- coding: utf-8 -*-
from __future__ import division
def swbd_folds_disfluency_corpus(corpus_input, num_files=496, num_folds=10):
"""Returns num_folds fold division of the input swbd PTB
disfluency corpus in num_folds strings
of the configuration of the division and the folds themselves.
Keyword Arguments:
corpus_input -- the (ID,.. up to n features) tuple which will be divded
into num_fold tuples
of the same type
"""
# we have 10 divisions of 496 (?) files, the smallheldout corpus already
# there so 9 to start with in rest
config = []
index = 0
# split main clean corpus into 9, have heldout as the other one, and rotate
folds = []
# calculate the dividing points based on nearest int
divs = []
# goes up to n-1th fold to get the split point
for i in range(1, num_folds):
split_point = int((i / num_folds) * num_files)
divs.append(split_point)
divs.append(num_files - 1) # add the last one
line = input.readline() # first line
for d in divs:
subcorpus = ""
posSubcorpus = ""
targetstop = d
currentSection = line.split(",")[0].split(":")[0]
current = currentSection
ranges = []
print currentSection
while index <= targetstop:
ranges.append(current)
while current == currentSection:
subcorpus += line.split(",")[1] + "\n"
posSubcorpus += input.readline().split(",")[1] + "\n"
line = input.readline() # read the next text level
if not line:
break # end of file, break to increment index?
current = line.split(",")[0].split(":")[0]
currentSection = current
index += 1
folds.append((tuple(ranges), subcorpus, posSubcorpus))
# fold always has structure (ranges,wordsubcorpus(big
# string),posSubcorpus(big string))
print "no of folds = ", str(len(folds))
for i in range(0, len(folds)):
test = i
if i == len(folds) - 1:
heldout = i - 1
else:
heldout = i + 1
training = []
for index in range(0, len(folds)):
if not index == heldout and not index == test:
training.append(index) # just appends an index
# config is always (list of training indices),heldoutout index,
# test(i.e. where we're assigning probs to)
config.append((tuple(training), heldout, test))
print "config size", str(len(config))
input.close()
return config, folds
def bnc_folds():
source = open("../data/bnc_spoken/bnc_spokenREF.text")
fold = open("../data/bnc_spoken/bnc_spokenAllREF.text", "w")
count = 0
uttcount = 0
for line in source:
ref, utt = line.split(",")
if ref == "":
pos = False
else:
pos = True
# Could ignore numbers as they are different in the BNC
# if pos == False:
#number = False
# for i in range(0,10):
# if str(i) in utt or "-" in utt:
# number = True
# for line in bnc:
# pass
# break
# break
# if number == True: continue
if len(line.split()) == 0:
if pos == True:
possline = line
else:
normalline = line
else:
continue
if pos == True:
if not len(possline.split()) == len(normalline.split()):
print possline, normalline
continue
# Could ignore filled pauses, as these are different,
#though could do normalization
# if "UH" in possline:
# #might be safer to ignore these
# continue
# #print normalline, possline
fold.write(normalline)
fold.write(possline)
count += len(utt.split())
uttcount += 1
# else:
# if count > 3000000: break
print count
print uttcount
fold.close()
source.close()
|
dsg-bielefeld/deep_disfluency
|
deep_disfluency/corpus/create_folds.py
|
Python
|
mit
| 4,213
|
#!/usr/bin/env python
"""\
Helios.py: Solve two-electron atoms using Pekeris techniques
Copyright 1999, 2000 Richard P. Muller, David R. Kent IV, and
William A. Goddard, III
Special thanks to Edward Montgomery, Michael Barnett, and
Robert Forrey, without whom this work would have been impossible.
Test values (from Pekeris) for verification.
Singlet states:
n Matrix Size H- He
5 22 0.52763068142 2.90368898612
9 95 0.52775001651 2.90372338908
10 125 0.52775061025 2.90372387862
11 161 0.52775085979 2.90372411115
12 203 0.52775093560 2.90372422832
13 252 0.52775097384 2.90372429041
16 444 0.52775100630 2.90372435622
19 715 0.52775101339 2.90372437081
22 1078 0.52775101536 2.90372437476
Triplet states:
n Matrix Size He
11 125 2.17522097961
14 252 2.17522925889
17 444 2.17522937679
>>> two_electron_solve(1,5,0)
-0.52763068141531577
>>> two_electron_solve(2,5,0)
-2.9036889861207293
>>> two_electron_solve(1,9,0)
-0.52775001651538511
>>> two_electron_solve(2,9,0)
-2.9037233890716729
>>> two_electron_solve(2,11,1)
-2.1752209796141289
"""
import numpy as np
from pyquante2.utils import geigh
def kval(l,m,n,spin):
# This is from Pekeris, except that the last term of the singlet
# expression corrects a type in Pekeris' paper
w = l+m+n
lm = l+m
if spin == 0:
k = w*(w+2)*(2*w+5)/24. + (1-pow(-1,w))/16. + lm*lm/4. +\
(1-pow(-1,lm))/8. + l+ 1 + lm/2.
elif spin == 1:
# The following was caught by Ed Montgomery, 1/16/00. Note
# the different sign from the previous expression.
#k = w*(w+2)*(2*w-1)/24. + (1-pow(-1,w))/16. + l*(m+n) + m
k = w*(w+2)*(2*w-1)/24. - (1-pow(-1,w))/16. + l*(m+n) + m
else:
print "kval: Error -- unknown spin"
sys.exit()
# k should now be an integer
k = int(k)
return k
def korder(wmax,spin):
# Return a list of tuples with (l,m,n) where:
# l Exponent of s term
# m Exponent of t term
# n Exponent of u term
klist = []
if spin == 0:
for w in range(wmax):
for l in range(w+1):
for m in range(w+1):
n = w-l-m
if n>=0 and l<=m:
klist.append((l,m,n))
elif spin == 1:
for w in range(wmax):
for n in range(w):
for m in range(w+1):
l = w-n-m
if 0<=l<m:
klist.append((l,m,n))
else:
print "korder: ERROR improper spin state"
sys.exit()
return klist
def hterm(l,m,n,l2,m2,n2):
# Obtain the Pekeris Hamiltonian:
# Hc = ScE
# H = a*Z + b
# S = c
delta = l2-l, m2-m, n2-n
if delta == (2,0,0):
x = 4*(l+1)*(l+2)
a = -x
b = 0
c = x*(1+m+n)
elif delta == (0,2,0):
x = 4*(m+1)*(m+2)
a = -x
b = 0
c = x*(1+l+n)
elif delta == (1,1,0):
x = 4*(l+1)*(m+1)
a = -2*x
b = x
c = x*(2+l+m)
elif delta == (1,0,1):
x = 2*(l+1)*(n+1)
a = -2*x
b = x
c = x*(2+2*m+n)
elif delta == (0,1,1):
x = 2*(m+1)*(n+1)
a = -2*x
b = x
c = x*(2+2*l+n)
elif delta == (0,0,2):
x = (n+1)*(n+2)
a = 0
b = x
c = 0
elif delta == (1,0,0):
x = l+1
a = 4*x*(4*l+4*m+2*n+7)
b = x*(-8*m-4*n-6)
c = -2*x*((m+n)*(4*m+12*l)+n*n+12*l+18*m+15*n+14)
elif delta == (0,1,0):
x = m+1
a = 4*x*(4*l+4*m+2*n+7)
b = x*(-8*l-4*n-6)
c = -2*x*((l+n)*(4*l+12*m)+n*n+12*m+18*l+15*n+14)
elif delta == (0,0,1):
x = 4*(n+1)
a = x*(2*l+2*m+2)
b = x*(-l-m-n-2)
c = -x*(-l*l-m*m+4*l*m+2*l*n+2*n*m+3*l+3*m+2*n+2)
elif delta == (0,2,-1):
x = 4*(m+1)*(m+2)*n
a = 0
b = 0
c = x
elif delta == (2,0,-1):
x = 4*(l+1)*(l+2)*n
a = 0
b = 0
c = x
elif delta == (-1,0,2):
x = 2*l*(n+1)*(n+2)
a = 0
b = 0
c = x
elif delta == (0,-1,2):
x = 2*m*(n+1)*(n+2)
a = 0
b = 0
c = x
elif delta == (0,0,0):
a = -4*((l+m)*(6*l+6*m+4*n+12)-4*l*m+4*n+8)
b = 4*(2*l+1)*(2*m+1)+4*(2*n+1)*(l+m+1)+6*n*n+6*n+2
c = 4*((l+m)*(10*l*m+10*m*n+10*l*n+10*l+10*m+18*n+4*n*n+16) +\
l*m*(4-12*n)+8+12*n+4*n*n)
elif delta == (-1,1,0):
x = 4*l*(m+1)
a = -2*x
b = x
c = x*(1+l+m)
elif delta == (1,-1,0):
x = 4*(l+1)*m
a = -2*x
b = x
c = x*(1+l+m)
elif delta == (-1,0,1):
x = 2*l*(n+1)
a = -2*x
b = x
c = x*(2*m-4*l-n)
elif delta == (0,-1,1):
x = 2*m*(n+1)
a = -2*x
b = x
c = x*(2*l-4*m-n)
elif delta == (1,0,-1):
x = 2*(l+1)*n
a = -2*x
b = x
c = x*(2*m-4*l-n-3)
elif delta == (0,1,-1):
x = 2*(m+1)*n
a = -2*x
b = x
c = x*(2*l-4*m-n-3)
elif delta == (-1,0,0):
x = 2*l
a = x*(8*l+8*m+4*n+6)
b = -x*(4*m+2*n+3)
c = -x*((m+n+1)*(12*l+4*m+2)+n+n*n)
elif delta == (0,-1,0):
x = 2*m
a = x*(8*l+8*m+4*n+6)
b = -x*(4*l+2*n+3)
c = -x*((l+n+1)*(12*m+4*l+2)+n+n*n)
elif delta == (0,0,-1):
x = 4*n
a = x*(2*l+2*m+2)
b = -x*(l+m+n+1)
c = -x*((l+m)*(1+2*n-l-m)+6*l*m+2*n)
elif delta == (1,0,-2):
x = 2*n*(n-1)*(l+1)
a = 0
b = 0
c = x
elif delta == (0,1,-2):
x = 2*n*(n-1)*(m+1)
a = 0
b = 0
c = x
elif delta == (-2,0,1):
x = 4*l*(l-1)*(n+1)
a = 0
b = 0
c = x
elif delta == (0,-2,1):
x = 4*m*(m-1)*(n+1)
a = 0
b = 0
c = x
elif delta == (-2,0,0):
x = 4*l*(l-1)
a = -x
b = 0
c = x*(1+m+n)
elif delta == (0,-2,0):
x = 4*m*(m-1)
a = -x
b = 0
c = x*(1+l+n)
elif delta == (0,0,-2):
x = n*(n-1)
a = 0
b = x
c = 0
elif delta == (-1,-1,0):
x = 4*l*m
a = -2*x
b = x
c = x*(l+m)
elif delta == (-1,0,-1):
x = 2*l*n
a = -2*x
b = x
c = x*(2*m+n+1)
elif delta == (0,-1,-1):
x = 2*m*n
a = -2*x
b = x
c = x*(2*l+n+1)
else:
a = 0.
b = 0.
c = 0.
return (a,b,c)
def pekeris(Z,wmax,spin):
# Return Pekeris H and S of order n with nuclear charge Z
# write H = a*Z + b
klist = korder(wmax,spin)
N = len(klist)
H = np.zeros((N,N),dtype=float)
S = np.zeros((N,N),dtype=float)
for index1 in range(N):
l,m,n = klist[index1]
k = kval(l,m,n,spin)
for index2 in range(N):
l2,m2,n2 = klist[index2]
k2 = kval(l2,m2,n2,spin)
i = k-1
j = k2-1
a,b,c = hterm(l,m,n,l2,m2,n2)
if l == m and l2 == m2:
a = 0.5*a
b = 0.5*b
c = 0.5*c
elif l == m or l2 == m2:
pass #do nothing here
elif spin == 1:
a2,b2,c2 = hterm(m,l,n,l2,m2,n2)
a = a - a2
b = b - b2
c = c - c2
elif spin == 0:
a2,b2,c2 = hterm(m,l,n,l2,m2,n2)
a = a + a2
b = b + b2
c = c + c2
else:
print "pekeris: ERROR should not be here"
sys.exit()
H[i,j] = a*Z + b
S[i,j] = c
return (H,S)
def transform(A,B):
# Similarity transformation: returns (B+)AB
C = matrixmultiply(A,B)
return matrixmultiply(transpose(B),C)
def inv_sqrt(M):
# Returns the inverse square root of a matrix
E,V = eigenvectors(M)
n = len(E)
M = zeros((n,n),Float)
for i in range(n):
M[i,i] = 1./sqrt(E[i])
return transform(M,V)
def two_electron_solve(atomic_number,maximum_order,spin):
Z = atomic_number
H,S = pekeris(Z,maximum_order,spin)
E,V = geigh(H,S)
epsilon = E[0]
E2 = [-Ei**2 for Ei in E]
#print "Energy (h) for order %d: %15.12f %15.12f" % (len(E),E2[0],epsilon)
return E2[0]
# Main program starts here:
if __name__ == '__main__':
import doctest; doctest.testmod()
|
Konjkov/pyquante2
|
pyquante2/hylleraas/helios.py
|
Python
|
bsd-3-clause
| 8,997
|
# Events.py
# Copyright (C) 2010 Daniel Callander
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License.
import DeeIRC.Events as Events
# ------------------------------------------------------------------------------
class ConnectedEvent(Events.ConnectedEvent):
def fire(self, bot):
"""Joins the configured channels on connect."""
for channel in bot.config["channels"]:
bot.sendJoin(channel)
# ------------------------------------------------------------------------------
class MessageEvent(Events.MessageEvent):
def fire(self, bot, nick, target, message):
"""Checks if a command has been said and run it if so."""
if message[0] == bot.config["command_prefix"]:
# Get the command from the message.
trigger_end = message.find(" ")
if not trigger_end >= 0:
# No parameters.
trigger_end = len(message)
trigger = message[1:trigger_end].lower()
# If the command exists, run the function.
plugin_name = bot.findPluginFromTrigger(trigger)
if plugin_name:
plugin = bot.getPlugin(plugin_name)
plugin.runCommand(bot, trigger, nick, target, message[trigger_end+1:])
if bot.debug:
bot.log("Command(" + plugin_name + ":" + trigger + "): (" + nick + ", " + target + ", " + message +")")
else:
# Run commands which do not rely on an actual trigger.
for plugin_name in bot.plugins:
plugin = bot.getPlugin(plugin_name)
if plugin.hasCommand("*"):
plugin.runCommand(bot, "*", nick, target, message)
if bot.debug:
bot.log("Command(" + plugin + ":*): (" + nick + ", " + target + ", " + message +")")
pass
|
arif25169/deebot
|
Events.py
|
Python
|
gpl-3.0
| 2,172
|
"""
REST API users model resource.
.. moduleauthor:: Martijn Vermaat <martijn@vermaat.name>
.. Licensed under the MIT license, see the LICENSE file.
"""
from flask import abort, g
from ...models import User, USER_ROLES
from ..errors import ValidationError
from ..security import is_user, has_role, require_basic_auth
from .base import ModelResource
class UsersResource(ModelResource):
"""
User resources model API users and their permissions.
"""
model = User
instance_name = 'user'
instance_type = 'user'
views = ['list', 'get', 'add', 'edit', 'delete']
orderable = ['name', 'added']
get_ensure_conditions = [has_role('admin'), is_user]
get_ensure_options = {'satisfy': any}
# Todo: I think we can lose the 'safe' constraint.
add_schema = {'login': {'type': 'string', 'minlength': 3, 'maxlength': 40,
'safe': True, 'required': True},
'name': {'type': 'string', 'maxlength': 200},
'password': {'type': 'string', 'required': True, 'maxlength': 500},
'email': {'type': 'string', 'maxlength': 200},
'roles': {'type': 'list', 'allowed': USER_ROLES}}
edit_ensure_conditions = [has_role('admin'), is_user]
edit_ensure_options = {'satisfy': any}
edit_schema = {'name': {'type': 'string', 'maxlength': 200},
'password': {'type': 'string', 'maxlength': 500},
'email': {'type': 'string', 'maxlength': 200},
'roles': {'type': 'list', 'allowed': USER_ROLES}}
delete_ensure_conditions = [has_role('admin'), is_user]
delete_ensure_options = {'satisfy': any}
@classmethod
def serialize(cls, instance, embed=None):
"""
A user is represented as an object with the following fields:
**uri** (`uri`)
URI for this resource.
**added** (`string`)
Date and time this user was added, see :ref:`api-datetime`.
**email** (`string`)
User e-mail address.
**login** (`string`)
Login name used for authentication.
**name** (`string`)
Human readable user name.
**roles** (`list` of `string`)
Roles for this user. Possible values for this field are `admin`,
`importer`, `annotator`, and `trader`.
"""
serialization = super(UsersResource, cls).serialize(instance, embed=embed)
serialization.update(name=instance.name,
login=instance.login,
email=instance.email,
roles=list(instance.roles),
added=str(instance.added.isoformat()))
return serialization
@classmethod
def list_view(cls, *args, **kwargs):
"""
Returns a collection of users in the `user_collection` field.
.. note:: Requires having the `admin` role.
**Orderable by:** `name`, `added`
"""
return super(UsersResource, cls).list_view(*args, **kwargs)
@classmethod
def get_view(cls, *args, **kwargs):
"""
Returns the user representation in the `user` field.
.. note:: Requires having the `admin` role or being the requested
user.
"""
return super(UsersResource, cls).get_view(*args, **kwargs)
@classmethod
@require_basic_auth
def add_view(cls, *args, **kwargs):
"""
Adds a user resource.
.. note:: Requires having the `admin` role.
.. note:: This request is only allowed using :ref:`HTTP Basic
Authentication <api-authentication-basic>`, not token
authentication.
**Required request data:**
- **login** (`string`)
- **password** (`string`)
**Accepted request data:**
- **name** (`string`)
- **email** (`string`)
- **roles** (`list` of `string`)
"""
login = kwargs.get('login')
kwargs['name'] = kwargs.get('name', login)
if User.query.filter_by(login=login).first() is not None:
raise ValidationError('User login is not unique')
return super(UsersResource, cls).add_view(*args, **kwargs)
# Todo: Document that all fields are optional.
@classmethod
@require_basic_auth
def edit_view(cls, *args, **kwargs):
"""
Updates a user resource.
.. note:: Requires having the `admin` role or being the requested
user.
.. note:: This request is only allowed using :ref:`HTTP Basic
Authentication <api-authentication-basic>`, not token
authentication.
**Accepted request data:**
- **email** (`string`)
- **login** (`string`)
- **name** (`string`)
- **roles** (`list` of `string`)
"""
if 'roles' in kwargs and 'admin' not in g.user.roles:
# Of course we don't allow any user to change their own roles,
# only admins can do that.
abort(403)
return super(UsersResource, cls).edit_view(*args, **kwargs)
@classmethod
@require_basic_auth
def delete_view(cls, *args, **kwargs):
"""
Todo: documentation, including how/if we cascade.
.. note:: This request is only allowed using :ref:`HTTP Basic
Authentication <api-authentication-basic>`, not token
authentication.
.. todo:: Document that we cascade the delete to tokens, but not to
samples and data sources.
"""
return super(UsersResource, cls).delete_view(*args, **kwargs)
|
sndrtj/varda
|
varda/api/resources/users.py
|
Python
|
mit
| 5,644
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 18 08:17:35 2013
Exploratory Data Analysis for Kiva Data
@author: Matthew Crowson
"""
import pymongo
from utils import mongodb_proxy
import logging
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats.stats import pearsonr, f_oneway
import multiprocessing as mp
import numpy as np
import os
import math
#Script Logging
LEVEL = logging.DEBUG
FORMAT = logging.Formatter('%(asctime)-15s Line %(lineno)s %(name)s %(levelname)-8s %(message)s')
log = logging.getLogger(__name__)
log.setLevel(LEVEL)
fhandler = logging.FileHandler('eda.log')
shandler = logging.StreamHandler()
fhandler.setFormatter(FORMAT)
shandler.setFormatter(FORMAT)
log.addHandler(fhandler)
log.addHandler(shandler)
log.info('Starting EDA Script')
class mongo_connection():
'''Creates an instance of a connection to the mongo DB'''
def __init__(self):
self._uri = 'mongodb://app:3knvak3ijs@localhost/kiva'
try:
self.client = mongodb_proxy.MongoProxy(pymongo.MongoClient(self._uri))
self.db = self.client['kiva']
except:
log.error('Could not establish a connection to Mongo Client')
def plt_distribution(var):
path = './figs/distributions/%s.png' % var
if os.path.isfile(path):
return # File already exists
black_list = ['use', 'activity']
if var in black_list or 'description' in var:
return # Don't try to plot text variables
try:
lower_bound = int(math.floor(min([del_loans[var].min(), def_loans[var].min()])))
upper_bound = int(math.ceil(max([del_loans[var].max(), def_loans[var].max()])))
binwidth = int(math.ceil((upper_bound - lower_bound)/20))
binwidth = 1 if binwidth == 0 else binwidth
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1,0.75, 0.75])
if del_loans[var].dtype.name == 'float64' or del_loans[var].dtype.name == 'int64':
fig = del_loans[var].hist(alpha=.5,
color='green',
bins=xrange(lower_bound, upper_bound+binwidth, binwidth),
weights=np.zeros_like(del_loans[var]) + 1. / del_loans[var].size,
label='Repaid')
if var != 'dollar_days_late_metric':
def_loans[var].hist(alpha=.5,
color='red',
bins=xrange(lower_bound, upper_bound+binwidth, binwidth),
weights=np.zeros_like(def_loans[var]) + 1. / def_loans[var].size,
label='Defaulted')
if del_loans[var].dtype.name == 'object':
fig = del_loans[var].plot(kind='bar',
alpha=.5,
color='green',
bins=xrange(lower_bound, upper_bound+binwidth, binwidth),
weights=np.zeros_like(del_loans[var]) + 1. / del_loans[var].size,
label='Repaid')
if var != 'dollar_days_late_metric':
def_loans[var].plot(kind='bar',
alpha=.5,
color='red',
bins=xrange(lower_bound, upper_bound+binwidth, binwidth),
weights=np.zeros_like(def_loans[var]) + 1. / def_loans[var].size,
label='Defaulted')
mu = np.average(del_loans[var])
sigma = np.std(del_loans[var])
textstr = 'Repaid\n$\mu=%.3f$\n$\sigma=%.3f$'%(mu, sigma)
props = dict(boxstyle='round', facecolor='#336600', alpha=0.5)
ax.text(1.02, 0.95, textstr, fontsize=14, transform=ax.transAxes,
verticalalignment='top', bbox=props)
plt.axvline(x=mu, color='#336600', linewidth=3.0)
plt.axvline(x=mu - sigma, color='#336600', linewidth=1.0, alpha=.5)
plt.axvline(x=mu + sigma, color='#336600', linewidth=1.0, alpha=.5)
mu = np.average(def_loans[var])
sigma = np.std(def_loans[var])
ignore_default = ['dollar_days_late_metric', 'actual_days_to_pay']
if var not in ignore_default:
textstr = 'Defaulted\n$\mu=%.3f$\n$\sigma=%.3f$'%(mu, sigma)
props = dict(boxstyle='round', facecolor='#990000', alpha=0.5)
ax.text(1.02, 0.72, textstr, fontsize=14, transform=ax.transAxes,
verticalalignment='top', bbox=props)
plt.axvline(x=mu, color='#990000', linewidth=3.0)
plt.axvline(x=mu-sigma, color='#990000', linewidth=1.0, alpha=.5)
plt.axvline(x=mu+sigma, color='#990000', linewidth=1.0, alpha=.5)
#One Way ANOVA Between Defaulted and Repaid
f_val, p_val = f_oneway(del_loans[var], def_loans[var])
textstr = 'ANOVA\np=%.3f'%(p_val)
props = dict(boxstyle='round', facecolor='white')
ax.text(1.02, 0.5, textstr, fontsize=14, transform=ax.transAxes,
verticalalignment='top', bbox=props)
plt.title('%s Distribution' % ' '.join([s.capitalize() for s in var.split('_')]))
plt.grid(False)
path = './figs/distributions/%s.png' % var
fig.get_figure().savefig(path)
except Exception as e:
log.error('Could not make a dist plot for %(var)s because of %(e)s' % {'var': var, 'e': e})
def plt_sigvar_regression(var):
var = var['var_name']
path = './figs/y_scatter/delinquent/ddl_scatter_%s.png' % var
if os.path.isfile(path):
return # File already exists
try:
plt.figure()
fig = plt.scatter(del_loans[var], del_loans['dollar_days_late_metric'], alpha=.2)
plt.xlabel(' '.join([s.capitalize() for s in var.split('_')]))
plt.ylabel('Dollar Days Late')
plt.title('%s Delinquency Scatter Plot' % ' '.join([s.capitalize() for s in var.split('_')]))
path = './figs/y_scatter/delinquent/ddl_scatter_%s.png' % var
fig.get_figure().savefig(path)
except:
log.error('Could not make a scatter plot with %s' % var)
return
def plt_sigvar_classifier(var):
var = var['var_name']
path = './figs/y_scatter/defaulted/def_scatter_%s.png' % var
if os.path.isfile(path):
return # File already exists
try:
plt.figure()
fig = plt.scatter(loans[var], loans['defaulted'], alpha=.2)
plt.xlabel(' '.join([s.capitalize() for s in var.split('_')]))
plt.ylabel('Defaulted')
plt.title('%s Defaulted Scatter Plot' % ' '.join([s.capitalize() for s in var.split('_')]))
path = './figs/y_scatter/defaulted/def_scatter_%s.png' % var
fig.get_figure().savefig(path)
except:
log.error('Could not make a scatter plot with %s' % var)
return
if __name__ == '__main__':
mongo_conn = mongo_connection()
kiva_db = mongo_conn.db
flat_loan_collection = kiva_db['flat_loans']
#Make directories for saving images
directories = ['./figs','./figs/distributions',
'./figs/y_scatter',
'./figs/y_scatter/defaulted',
'./figs/y_scatter/delinquent']
for folder in directories:
if os.path.isdir(str(os.sep).join([os.getcwd(), folder])) is False:
os.mkdir(str(os.sep).join([os.getcwd(), folder]))
obs_count = flat_loan_collection.find().count()
def_count = flat_loan_collection.find({'defaulted':1}).count()
del_count = obs_count - def_count
log.debug('Of the %(obs)i, we had %(def)i default and %(del)i delinquency' %
{'obs': obs_count, 'def': def_count, 'del': del_count})
cursor = flat_loan_collection.find()
loans = list(cursor)
loans = pd.DataFrame(loans)
loans.fillna(value=0, inplace=True)
#Remove variables that are populated during the life of the loan or are unhelpful
remove_vars = ['translator_byline',
'translator_image',
'video_title',
'video_id',
'image_template_id',
'image_id',
'video_thumbnailImageId',
'video_youtubeId']
[loans.drop(var, 1, inplace=True) for var in remove_vars]
log.info(loans.describe())
del_loans = loans.ix[loans['defaulted'] == 0]
def_loans = loans.ix[loans['defaulted'] == 1]
pool = mp.Pool()
pool.map(plt_distribution, loans.keys())
pool.close()
pool.join()
pool.terminate()
#Arrears distribution by the life of the loan
fig = plt.figure()
del_deciles = ['delinquency_decile_%s' % i for i in xrange(1, 11)]
del_loans.boxplot(column=del_deciles)
plt.title('Arrears Distribution by Life of Loan')
plt.xlabel('Life of Loan')
plt.ylabel('Pct. of Loan Value in Arrears')
plt.xticks(xrange(1, 11), [' '.join([str(i), '%']) for i in xrange(10, 101, 10)])
plt.ylim(-1, 1.05)
fig.savefig('./figs/del_deciles.png')
#Find out which variables have a significant correlation with dollar days late. Make scatter plots
keys = list()
black_list = ['_id',
'activity',
'use']
for k in del_loans.keys():
if k not in black_list and 'description' not in k:
try:
keys.append({'var_name': k, 'pearson': pearsonr(del_loans['dollar_days_late_metric'], del_loans[k])})
except:
log.debug('Could not calculate Pearson R for dollar days late and %s' %k)
sig_keys = [k for k in keys if k['pearson'][1] < 0.05]
log.info('Delinquency Significant Keys')
log.info(sig_keys)
map(plt_sigvar_regression, sig_keys)
#Analyze Defaulted Loans with scatter plots as well
keys = list()
for k in loans.keys():
if k not in black_list and 'description' not in k:
try:
keys.append({'var_name': k, 'pearson': pearsonr(loans['defaulted'], loans[k])})
except:
log.debug('Could not calculate Pearson R for defaulted and %s' % k)
sig_keys = [k for k in keys if k['pearson'][1] < 0.05]
log.info('Default Significant Keys')
log.info(sig_keys)
map(plt_sigvar_classifier, sig_keys)
|
mcrowson/predict-kiva
|
EDA.py
|
Python
|
apache-2.0
| 10,674
|
"""
Example sentences to test spaCy and its language models.
>>> from spacy.lang.gu.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"લોકશાહી એ સરકારનું એક એવું તંત્ર છે જ્યાં નાગરિકો મત દ્વારા સત્તાનો ઉપયોગ કરે છે.",
"તે ગુજરાત રાજ્યના ધરમપુર શહેરમાં આવેલું હતું",
"કર્ણદેવ પહેલો સોલંકી વંશનો રાજા હતો",
"તેજપાળને બે પત્ની હતી",
"ગુજરાતમાં ભારતીય જનતા પક્ષનો ઉદય આ સમયગાળા દરમિયાન થયો",
"આંદોલનકારીઓએ ચીમનભાઇ પટેલના રાજીનામાની માંગણી કરી.",
"અહિયાં શું જોડાય છે?",
"મંદિરનો પૂર્વાભિમુખ ભાગ નાના મંડપ સાથે થોડો લંબચોરસ આકારનો છે.",
]
|
spacy-io/spaCy
|
spacy/lang/gu/examples.py
|
Python
|
mit
| 1,215
|
#!/usr/bin/env python
'''
Using Netmiko enter into configuration mode on a network device.
Verify that you are currently in configuration mode.
'''
from getpass import getpass
from netmiko import ConnectHandler
from test_devices import pynet1, pynet2, juniper_srx
def main():
'''
Using Netmiko enter into configuration mode on a network device.
Verify that you are currently in configuration mode.
'''
ip_addr = raw_input("Enter IP address: ")
password = getpass()
for a_dict in (pynet1, pynet2, juniper_srx):
a_dict['ip'] = ip_addr
a_dict['password'] = password
net_connect2 = ConnectHandler(**pynet2)
net_connect2.config_mode()
print "\n>>>>"
print "Checking pynet-rtr2 is in configuration mode."
print "Config mode check: {}".format(net_connect2.check_config_mode())
print "Current prompt: {}".format(net_connect2.find_prompt())
print ">>>>\n"
if __name__ == "__main__":
main()
|
nonemaw/pynet
|
pyth_ans_ecourse/class4/ex5_netmiko.py
|
Python
|
gpl-2.0
| 965
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2016 Red Hat, Inc.
# Copyright © 2014-2015 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import calendar
import datetime
import decimal
import iso8601
from oslo_db.sqlalchemy import models
import six
import sqlalchemy
from sqlalchemy.dialects import mysql
from sqlalchemy.ext import declarative
from sqlalchemy import types
import sqlalchemy_utils
from gnocchi import archive_policy
from gnocchi import indexer
from gnocchi import resource_type
from gnocchi import storage
from gnocchi import utils
Base = declarative.declarative_base()
COMMON_TABLES_ARGS = {'mysql_charset': "utf8",
'mysql_engine': "InnoDB"}
class PreciseTimestamp(types.TypeDecorator):
"""Represents a timestamp precise to the microsecond.
Deprecated in favor of TimestampUTC.
Still used in alembic migrations.
"""
impl = sqlalchemy.DateTime
@staticmethod
def _decimal_to_dt(dec):
"""Return a datetime from Decimal unixtime format."""
if dec is None:
return None
integer = int(dec)
micro = (dec - decimal.Decimal(integer)) * decimal.Decimal(1000000)
daittyme = datetime.datetime.utcfromtimestamp(integer)
return daittyme.replace(microsecond=int(round(micro)))
@staticmethod
def _dt_to_decimal(utc):
"""Datetime to Decimal.
Some databases don't store microseconds in datetime
so we always store as Decimal unixtime.
"""
if utc is None:
return None
decimal.getcontext().prec = 30
return (decimal.Decimal(str(calendar.timegm(utc.utctimetuple()))) +
(decimal.Decimal(str(utc.microsecond)) /
decimal.Decimal("1000000.0")))
def load_dialect_impl(self, dialect):
if dialect.name == 'mysql':
return dialect.type_descriptor(
types.DECIMAL(precision=20,
scale=6,
asdecimal=True))
return dialect.type_descriptor(self.impl)
def compare_against_backend(self, dialect, conn_type):
if dialect.name == 'mysql':
return issubclass(type(conn_type), types.DECIMAL)
return issubclass(type(conn_type), type(self.impl))
def process_bind_param(self, value, dialect):
if value is not None:
value = utils.normalize_time(value)
if dialect.name == 'mysql':
return self._dt_to_decimal(value)
return value
def process_result_value(self, value, dialect):
if dialect.name == 'mysql':
value = self._decimal_to_dt(value)
if value is not None:
return utils.normalize_time(value).replace(
tzinfo=iso8601.iso8601.UTC)
class TimestampUTC(types.TypeDecorator):
"""Represents a timestamp precise to the microsecond."""
impl = sqlalchemy.DateTime
def load_dialect_impl(self, dialect):
if dialect.name == 'mysql':
return dialect.type_descriptor(mysql.DATETIME(fsp=6))
return self.impl
def process_bind_param(self, value, dialect):
if value is not None:
return utils.normalize_time(value)
def process_result_value(self, value, dialect):
if value is not None:
return value.replace(tzinfo=iso8601.iso8601.UTC)
class GnocchiBase(models.ModelBase):
__table_args__ = (
COMMON_TABLES_ARGS,
)
class ArchivePolicyDefinitionType(sqlalchemy_utils.JSONType):
def process_bind_param(self, value, dialect):
if value is not None:
return super(
ArchivePolicyDefinitionType, self).process_bind_param(
[v.serialize() for v in value],
dialect)
def process_result_value(self, value, dialect):
values = super(ArchivePolicyDefinitionType,
self).process_result_value(value, dialect)
return [archive_policy.ArchivePolicyItem(**v) for v in values]
class SetType(sqlalchemy_utils.JSONType):
def process_result_value(self, value, dialect):
return set(super(SetType,
self).process_result_value(value, dialect))
class ArchivePolicy(Base, GnocchiBase, archive_policy.ArchivePolicy):
__tablename__ = 'archive_policy'
name = sqlalchemy.Column(sqlalchemy.String(255), primary_key=True)
back_window = sqlalchemy.Column(sqlalchemy.Integer, nullable=False)
definition = sqlalchemy.Column(ArchivePolicyDefinitionType, nullable=False)
# TODO(jd) Use an array of string instead, PostgreSQL can do that
aggregation_methods = sqlalchemy.Column(SetType,
nullable=False)
class Metric(Base, GnocchiBase, storage.Metric):
__tablename__ = 'metric'
__table_args__ = (
sqlalchemy.Index('ix_metric_status', 'status'),
sqlalchemy.UniqueConstraint("resource_id", "name",
name="uniq_metric0resource_id0name"),
COMMON_TABLES_ARGS,
)
id = sqlalchemy.Column(sqlalchemy_utils.UUIDType(),
primary_key=True)
archive_policy_name = sqlalchemy.Column(
sqlalchemy.String(255),
sqlalchemy.ForeignKey(
'archive_policy.name',
ondelete="RESTRICT",
name="fk_metric_ap_name_ap_name"),
nullable=False)
archive_policy = sqlalchemy.orm.relationship(ArchivePolicy, lazy="joined")
creator = sqlalchemy.Column(sqlalchemy.String(255))
resource_id = sqlalchemy.Column(
sqlalchemy_utils.UUIDType(),
sqlalchemy.ForeignKey('resource.id',
ondelete="SET NULL",
name="fk_metric_resource_id_resource_id"))
name = sqlalchemy.Column(sqlalchemy.String(255))
unit = sqlalchemy.Column(sqlalchemy.String(31))
status = sqlalchemy.Column(sqlalchemy.Enum('active', 'delete',
name="metric_status_enum"),
nullable=False,
server_default='active')
def jsonify(self):
d = {
"id": self.id,
"creator": self.creator,
"name": self.name,
"unit": self.unit,
}
unloaded = sqlalchemy.inspect(self).unloaded
if 'resource' in unloaded:
d['resource_id'] = self.resource_id
else:
d['resource'] = self.resource
if 'archive_policy' in unloaded:
d['archive_policy_name'] = self.archive_policy_name
else:
d['archive_policy'] = self.archive_policy
if self.creator is None:
d['created_by_user_id'] = d['created_by_project_id'] = None
else:
d['created_by_user_id'], _, d['created_by_project_id'] = (
self.creator.partition(":")
)
return d
def __eq__(self, other):
# NOTE(jd) If `other` is a SQL Metric, we only compare
# archive_policy_name, and we don't compare archive_policy that might
# not be loaded. Otherwise we fallback to the original comparison for
# storage.Metric.
return ((isinstance(other, Metric)
and self.id == other.id
and self.archive_policy_name == other.archive_policy_name
and self.creator == other.creator
and self.name == other.name
and self.unit == other.unit
and self.resource_id == other.resource_id)
or (storage.Metric.__eq__(self, other)))
__hash__ = storage.Metric.__hash__
RESOURCE_TYPE_SCHEMA_MANAGER = resource_type.ResourceTypeSchemaManager(
"gnocchi.indexer.sqlalchemy.resource_type_attribute")
class ResourceTypeAttributes(sqlalchemy_utils.JSONType):
def process_bind_param(self, attributes, dialect):
return super(ResourceTypeAttributes, self).process_bind_param(
attributes.jsonify(), dialect)
def process_result_value(self, value, dialect):
attributes = super(ResourceTypeAttributes, self).process_result_value(
value, dialect)
return RESOURCE_TYPE_SCHEMA_MANAGER.attributes_from_dict(attributes)
class ResourceType(Base, GnocchiBase, resource_type.ResourceType):
__tablename__ = 'resource_type'
__table_args__ = (
sqlalchemy.UniqueConstraint("tablename",
name="uniq_resource_type0tablename"),
COMMON_TABLES_ARGS,
)
name = sqlalchemy.Column(sqlalchemy.String(255), primary_key=True,
nullable=False)
tablename = sqlalchemy.Column(sqlalchemy.String(35), nullable=False)
attributes = sqlalchemy.Column(ResourceTypeAttributes)
state = sqlalchemy.Column(sqlalchemy.Enum("active", "creating",
"creation_error", "deleting",
"deletion_error", "updating",
"updating_error",
name="resource_type_state_enum"),
nullable=False,
server_default="creating")
updated_at = sqlalchemy.Column(TimestampUTC, nullable=False,
# NOTE(jd): We would like to use
# sqlalchemy.func.now, but we can't
# because the type of PreciseTimestamp in
# MySQL is not a Timestamp, so it would
# not store a timestamp but a date as an
# integer.
default=lambda: utils.utcnow())
def to_baseclass(self):
cols = {}
for attr in self.attributes:
cols[attr.name] = sqlalchemy.Column(attr.satype,
nullable=not attr.required)
return type(str("%s_base" % self.tablename), (object, ), cols)
class ResourceJsonifier(indexer.Resource):
def jsonify(self):
d = dict(self)
del d['revision']
if 'metrics' not in sqlalchemy.inspect(self).unloaded:
d['metrics'] = dict((m.name, six.text_type(m.id))
for m in self.metrics)
if self.creator is None:
d['created_by_user_id'] = d['created_by_project_id'] = None
else:
d['created_by_user_id'], _, d['created_by_project_id'] = (
self.creator.partition(":")
)
return d
class ResourceMixin(ResourceJsonifier):
@declarative.declared_attr
def __table_args__(cls):
return (sqlalchemy.CheckConstraint('started_at <= ended_at',
name="ck_started_before_ended"),
COMMON_TABLES_ARGS)
@declarative.declared_attr
def type(cls):
return sqlalchemy.Column(
sqlalchemy.String(255),
sqlalchemy.ForeignKey('resource_type.name',
ondelete="RESTRICT",
name="fk_%s_resource_type_name" %
cls.__tablename__),
nullable=False)
creator = sqlalchemy.Column(sqlalchemy.String(255))
started_at = sqlalchemy.Column(TimestampUTC, nullable=False,
default=lambda: utils.utcnow())
revision_start = sqlalchemy.Column(TimestampUTC, nullable=False,
default=lambda: utils.utcnow())
ended_at = sqlalchemy.Column(TimestampUTC)
user_id = sqlalchemy.Column(sqlalchemy.String(255))
project_id = sqlalchemy.Column(sqlalchemy.String(255))
original_resource_id = sqlalchemy.Column(sqlalchemy.String(255),
nullable=False)
class Resource(ResourceMixin, Base, GnocchiBase):
__tablename__ = 'resource'
_extra_keys = ['revision', 'revision_end']
revision = -1
id = sqlalchemy.Column(sqlalchemy_utils.UUIDType(),
primary_key=True)
revision_end = None
metrics = sqlalchemy.orm.relationship(
Metric, backref="resource",
primaryjoin="and_(Resource.id == Metric.resource_id, "
"Metric.status == 'active')")
def get_metric(self, metric_name):
m = super(Resource, self).get_metric(metric_name)
if m:
if sqlalchemy.orm.session.object_session(self):
# NOTE(jd) The resource is already loaded so that should not
# trigger a SELECT
m.resource
return m
class ResourceHistory(ResourceMixin, Base, GnocchiBase):
__tablename__ = 'resource_history'
revision = sqlalchemy.Column(sqlalchemy.Integer, autoincrement=True,
primary_key=True)
id = sqlalchemy.Column(sqlalchemy_utils.UUIDType(),
sqlalchemy.ForeignKey(
'resource.id',
ondelete="CASCADE",
name="fk_rh_id_resource_id"),
nullable=False)
revision_end = sqlalchemy.Column(TimestampUTC, nullable=False,
default=lambda: utils.utcnow())
metrics = sqlalchemy.orm.relationship(
Metric, primaryjoin="Metric.resource_id == ResourceHistory.id",
foreign_keys='Metric.resource_id')
class ResourceExt(object):
"""Default extension class for plugin
Used for plugin that doesn't need additional columns
"""
class ResourceExtMixin(object):
@declarative.declared_attr
def __table_args__(cls):
return (COMMON_TABLES_ARGS, )
@declarative.declared_attr
def id(cls):
tablename_compact = cls.__tablename__
if tablename_compact.endswith("_history"):
tablename_compact = tablename_compact[:-6]
return sqlalchemy.Column(
sqlalchemy_utils.UUIDType(),
sqlalchemy.ForeignKey(
'resource.id',
ondelete="CASCADE",
name="fk_%s_id_resource_id" % tablename_compact,
# NOTE(sileht): We use to ensure that postgresql
# does not use AccessExclusiveLock on destination table
use_alter=True),
primary_key=True
)
class ResourceHistoryExtMixin(object):
@declarative.declared_attr
def __table_args__(cls):
return (COMMON_TABLES_ARGS, )
@declarative.declared_attr
def revision(cls):
tablename_compact = cls.__tablename__
if tablename_compact.endswith("_history"):
tablename_compact = tablename_compact[:-6]
return sqlalchemy.Column(
sqlalchemy.Integer,
sqlalchemy.ForeignKey(
'resource_history.revision',
ondelete="CASCADE",
name="fk_%s_revision_rh_revision"
% tablename_compact,
# NOTE(sileht): We use to ensure that postgresql
# does not use AccessExclusiveLock on destination table
use_alter=True),
primary_key=True
)
class HistoryModelIterator(models.ModelIterator):
def __next__(self):
# NOTE(sileht): Our custom resource attribute columns don't
# have the same name in database than in sqlalchemy model
# so remove the additional "f_" for the model name
n = six.advance_iterator(self.i)
model_attr = n[2:] if n[:2] == "f_" else n
return model_attr, getattr(self.model, n)
class ArchivePolicyRule(Base, GnocchiBase):
__tablename__ = 'archive_policy_rule'
name = sqlalchemy.Column(sqlalchemy.String(255), primary_key=True)
archive_policy_name = sqlalchemy.Column(
sqlalchemy.String(255),
sqlalchemy.ForeignKey(
'archive_policy.name',
ondelete="RESTRICT",
name="fk_apr_ap_name_ap_name"),
nullable=False)
metric_pattern = sqlalchemy.Column(sqlalchemy.String(255), nullable=False)
|
leandroreox/gnocchi
|
gnocchi/indexer/sqlalchemy_base.py
|
Python
|
apache-2.0
| 16,758
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.inception_v4."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import inception
class InceptionTest(tf.test.TestCase):
def testBuildLogits(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v4(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertTrue(auxlogits.op.name.startswith('InceptionV4/AuxLogits'))
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue(predictions.op.name.startswith(
'InceptionV4/Logits/Predictions'))
self.assertListEqual(predictions.get_shape().as_list(),
[batch_size, num_classes])
def testBuildPreLogitsNetwork(self):
batch_size = 5
height, width = 299, 299
num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3))
net, end_points = inception.inception_v4(inputs, num_classes)
self.assertTrue(net.op.name.startswith('InceptionV4/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1536])
self.assertFalse('Logits' in end_points)
self.assertFalse('Predictions' in end_points)
def testBuildWithoutAuxLogits(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, endpoints = inception.inception_v4(inputs, num_classes,
create_aux_logits=False)
self.assertFalse('AuxLogits' in endpoints)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testAllEndPointsShapes(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v4(inputs, num_classes)
endpoints_shapes = {'Conv2d_1a_3x3': [batch_size, 149, 149, 32],
'Conv2d_2a_3x3': [batch_size, 147, 147, 32],
'Conv2d_2b_3x3': [batch_size, 147, 147, 64],
'Mixed_3a': [batch_size, 73, 73, 160],
'Mixed_4a': [batch_size, 71, 71, 192],
'Mixed_5a': [batch_size, 35, 35, 384],
# 4 x Inception-A blocks
'Mixed_5b': [batch_size, 35, 35, 384],
'Mixed_5c': [batch_size, 35, 35, 384],
'Mixed_5d': [batch_size, 35, 35, 384],
'Mixed_5e': [batch_size, 35, 35, 384],
# Reduction-A block
'Mixed_6a': [batch_size, 17, 17, 1024],
# 7 x Inception-B blocks
'Mixed_6b': [batch_size, 17, 17, 1024],
'Mixed_6c': [batch_size, 17, 17, 1024],
'Mixed_6d': [batch_size, 17, 17, 1024],
'Mixed_6e': [batch_size, 17, 17, 1024],
'Mixed_6f': [batch_size, 17, 17, 1024],
'Mixed_6g': [batch_size, 17, 17, 1024],
'Mixed_6h': [batch_size, 17, 17, 1024],
# Reduction-A block
'Mixed_7a': [batch_size, 8, 8, 1536],
# 3 x Inception-C blocks
'Mixed_7b': [batch_size, 8, 8, 1536],
'Mixed_7c': [batch_size, 8, 8, 1536],
'Mixed_7d': [batch_size, 8, 8, 1536],
# Logits and predictions
'AuxLogits': [batch_size, num_classes],
'global_pool': [batch_size, 1, 1, 1536],
'PreLogitsFlatten': [batch_size, 1536],
'Logits': [batch_size, num_classes],
'Predictions': [batch_size, num_classes]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 299, 299
inputs = tf.random_uniform((batch_size, height, width, 3))
net, end_points = inception.inception_v4_base(inputs)
self.assertTrue(net.op.name.startswith(
'InceptionV4/Mixed_7d'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 8, 8, 1536])
expected_endpoints = [
'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'Mixed_3a',
'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
'Mixed_6e', 'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a',
'Mixed_7b', 'Mixed_7c', 'Mixed_7d']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
for name, op in end_points.iteritems():
self.assertTrue(op.name.startswith('InceptionV4/' + name))
def testBuildOnlyUpToFinalEndpoint(self):
batch_size = 5
height, width = 299, 299
all_endpoints = [
'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'Mixed_3a',
'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d',
'Mixed_6e', 'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a',
'Mixed_7b', 'Mixed_7c', 'Mixed_7d']
for index, endpoint in enumerate(all_endpoints):
with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_v4_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
'InceptionV4/' + endpoint))
self.assertItemsEqual(all_endpoints[:index+1], end_points.keys())
def testVariablesSetDevice(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
# Force all Variables to reside on the device.
with tf.variable_scope('on_cpu'), tf.device('/cpu:0'):
inception.inception_v4(inputs, num_classes)
with tf.variable_scope('on_gpu'), tf.device('/gpu:0'):
inception.inception_v4(inputs, num_classes)
for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'):
self.assertDeviceEqual(v.device, '/cpu:0')
for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'):
self.assertDeviceEqual(v.device, '/gpu:0')
def testHalfSizeImages(self):
batch_size = 5
height, width = 150, 150
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v4(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_7d']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 3, 3, 1536])
def testGlobalPool(self):
batch_size = 1
height, width = 350, 400
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v4(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_7d']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 9, 11, 1536])
def testGlobalPoolUnknownImageShape(self):
batch_size = 1
height, width = 350, 400
num_classes = 1000
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, (batch_size, None, None, 3))
logits, end_points = inception.inception_v4(
inputs, num_classes, create_aux_logits=False)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_7d']
images = tf.random_uniform((batch_size, height, width, 3))
sess.run(tf.global_variables_initializer())
logits_out, pre_pool_out = sess.run([logits, pre_pool],
{inputs: images.eval()})
self.assertTupleEqual(logits_out.shape, (batch_size, num_classes))
self.assertTupleEqual(pre_pool_out.shape, (batch_size, 9, 11, 1536))
def testUnknownBatchSize(self):
batch_size = 1
height, width = 299, 299
num_classes = 1000
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_v4(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3))
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 299, 299
num_classes = 1000
with self.test_session() as sess:
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v4(eval_inputs,
num_classes,
is_training=False)
predictions = tf.argmax(logits, 1)
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
with self.test_session() as sess:
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
inception.inception_v4(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v4(eval_inputs,
num_classes,
is_training=False,
reuse=True)
predictions = tf.argmax(logits, 1)
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
if __name__ == '__main__':
tf.test.main()
|
jiaphuan/models
|
research/slim/nets/inception_v4_test.py
|
Python
|
apache-2.0
| 11,999
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "brewmaster.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
hacktobacillus/fermenter
|
manage.py
|
Python
|
mit
| 253
|
# This file is part of Boomer Core.
#
# Boomer Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Boomer Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Boomer Core. If not, see <http://www.gnu.org/licenses/>.
#
# Forked from Mycroft Core on 2017-07-29
import collections
import audioop
from time import sleep
import pyaudio
from speech_recognition import (
Microphone,
AudioSource,
AudioData
)
import speech_recognition
from boomer.configuration import ConfigurationManager
from boomer.util.log import getLogger
listener_config = ConfigurationManager.get().get('listener')
logger = getLogger(__name__)
__author__ = 'seanfitz'
class MutableStream(object):
def __init__(self, wrapped_stream, format, muted=False):
assert wrapped_stream is not None
self.wrapped_stream = wrapped_stream
self.muted = muted
self.SAMPLE_WIDTH = pyaudio.get_sample_size(format)
self.muted_buffer = b''.join([b'\x00' * self.SAMPLE_WIDTH])
def mute(self):
self.muted = True
def unmute(self):
self.muted = False
def read(self, size):
frames = collections.deque()
remaining = size
while remaining > 0:
to_read = min(self.wrapped_stream.get_read_available(), remaining)
if to_read == 0:
sleep(.01)
continue
result = self.wrapped_stream.read(to_read)
frames.append(result)
remaining -= to_read
if self.muted:
return self.muted_buffer
input_latency = self.wrapped_stream.get_input_latency()
if input_latency > 0.2:
logger.warn("High input latency: %f" % input_latency)
audio = b"".join(list(frames))
return audio
def close(self):
self.wrapped_stream.close()
self.wrapped_stream = None
def is_stopped(self):
return self.wrapped_stream.is_stopped()
def stop_stream(self):
return self.wrapped_stream.stop_stream()
class MutableMicrophone(Microphone):
def __init__(self, device_index=None, sample_rate=16000, chunk_size=1024):
Microphone.__init__(
self, device_index=device_index, sample_rate=sample_rate,
chunk_size=chunk_size)
self.muted = False
def __enter__(self):
assert self.stream is None, \
"This audio source is already inside a context manager"
self.audio = pyaudio.PyAudio()
self.stream = MutableStream(self.audio.open(
input_device_index=self.device_index, channels=1,
format=self.format, rate=self.SAMPLE_RATE,
frames_per_buffer=self.CHUNK,
input=True, # stream is an input stream
), self.format, self.muted)
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self.stream.is_stopped():
self.stream.stop_stream()
self.stream.close()
self.stream = None
self.audio.terminate()
def mute(self):
self.muted = True
if self.stream:
self.stream.mute()
def unmute(self):
self.muted = False
if self.stream:
self.stream.unmute()
class ResponsiveRecognizer(speech_recognition.Recognizer):
# The maximum audio in seconds to keep for transcribing a phrase
# The wake word must fit in this time
SAVED_WW_SEC = 1.0
# Padding of silence when feeding to pocketsphinx
SILENCE_SEC = 0.01
# The minimum seconds of noise before a
# phrase can be considered complete
MIN_LOUD_SEC_PER_PHRASE = 0.1
# The maximum length a phrase can be recorded,
# provided there is noise the entire time
RECORDING_TIMEOUT = 30.0
# The maximum time it will continue to record silence
# when not enough noise has been detected
RECORDING_TIMEOUT_WITH_SILENCE = 3.0
# Time between pocketsphinx checks for the wake word
SEC_BETWEEN_WW_CHECKS = 0.2
def __init__(self, wake_word_recognizer):
speech_recognition.Recognizer.__init__(self)
self.wake_word_recognizer = wake_word_recognizer
self.audio = pyaudio.PyAudio()
self.threshold_multiplier = float(
listener_config.get('threshold_multiplier'))
self.dynamic_energy_ratio = float(
listener_config.get('dynamic_energy_ratio'))
@staticmethod
def record_sound_chunk(source):
return source.stream.read(source.CHUNK)
@staticmethod
def calc_energy(sound_chunk, sample_width):
return audioop.rms(sound_chunk, sample_width)
def wake_word_in_audio(self, frame_data):
hyp = self.wake_word_recognizer.transcribe(frame_data)
return self.wake_word_recognizer.found_wake_word(hyp)
def record_phrase(self, source, sec_per_buffer):
"""
This attempts to record an entire spoken phrase. Essentially,
this waits for a period of silence and then returns the audio
:rtype: bytearray
:param source: AudioSource
:param sec_per_buffer: Based on source.SAMPLE_RATE
:return: bytearray representing the frame_data of the recorded phrase
"""
num_loud_chunks = 0
noise = 0
max_noise = 25
min_noise = 0
def increase_noise(level):
if level < max_noise:
return level + 200 * sec_per_buffer
return level
def decrease_noise(level):
if level > min_noise:
return level - 100 * sec_per_buffer
return level
# Smallest number of loud chunks required to return
min_loud_chunks = int(self.MIN_LOUD_SEC_PER_PHRASE / sec_per_buffer)
# Maximum number of chunks to record before timing out
max_chunks = int(self.RECORDING_TIMEOUT / sec_per_buffer)
num_chunks = 0
# Will return if exceeded this even if there's not enough loud chunks
max_chunks_of_silence = int(self.RECORDING_TIMEOUT_WITH_SILENCE /
sec_per_buffer)
# bytearray to store audio in
byte_data = '\0' * source.SAMPLE_WIDTH
phrase_complete = False
while num_chunks < max_chunks and not phrase_complete:
chunk = self.record_sound_chunk(source)
byte_data += chunk
num_chunks += 1
energy = self.calc_energy(chunk, source.SAMPLE_WIDTH)
test_threshold = self.energy_threshold * self.threshold_multiplier
is_loud = energy > test_threshold
if is_loud:
noise = increase_noise(noise)
num_loud_chunks += 1
else:
noise = decrease_noise(noise)
self.adjust_threshold(energy, sec_per_buffer)
was_loud_enough = num_loud_chunks > min_loud_chunks
quiet_enough = noise <= min_noise
recorded_too_much_silence = num_chunks > max_chunks_of_silence
if quiet_enough and (was_loud_enough or recorded_too_much_silence):
phrase_complete = True
return byte_data
@staticmethod
def sec_to_bytes(sec, source):
return sec * source.SAMPLE_RATE * source.SAMPLE_WIDTH
def wait_until_wake_word(self, source, sec_per_buffer):
num_silent_bytes = int(self.SILENCE_SEC * source.SAMPLE_RATE *
source.SAMPLE_WIDTH)
silence = '\0' * num_silent_bytes
# bytearray to store audio in
byte_data = silence
buffers_per_check = self.SEC_BETWEEN_WW_CHECKS / sec_per_buffer
buffers_since_check = 0.0
# Max bytes for byte_data before audio is removed from the front
max_size = self.sec_to_bytes(self.SAVED_WW_SEC, source)
said_wake_word = False
while not said_wake_word:
chunk = self.record_sound_chunk(source)
energy = self.calc_energy(chunk, source.SAMPLE_WIDTH)
if energy < self.energy_threshold * self.threshold_multiplier:
self.adjust_threshold(energy, sec_per_buffer)
needs_to_grow = len(byte_data) < max_size
if needs_to_grow:
byte_data += chunk
else: # Remove beginning of audio and add new chunk to end
byte_data = byte_data[len(chunk):] + chunk
buffers_since_check += 1.0
if buffers_since_check < buffers_per_check:
buffers_since_check -= buffers_per_check
said_wake_word = self.wake_word_in_audio(byte_data + silence)
@staticmethod
def create_audio_data(raw_data, source):
"""
Constructs an AudioData instance with the same parameters
as the source and the specified frame_data
"""
return AudioData(raw_data, source.SAMPLE_RATE, source.SAMPLE_WIDTH)
def listen(self, source, emitter):
"""
Listens for audio that Boomer should respond to
:param source: an ``AudioSource`` instance for reading from
:param emitter: a pyee EventEmitter for sending when the wakeword
has been found
"""
assert isinstance(source, AudioSource), "Source must be an AudioSource"
bytes_per_sec = source.SAMPLE_RATE * source.SAMPLE_WIDTH
sec_per_buffer = float(source.CHUNK) / bytes_per_sec
logger.debug("Waiting for wake word...")
self.wait_until_wake_word(source, sec_per_buffer)
logger.debug("Recording...")
emitter.emit("recognizer_loop:record_begin")
frame_data = self.record_phrase(source, sec_per_buffer)
audio_data = self.create_audio_data(frame_data, source)
emitter.emit("recognizer_loop:record_end")
logger.debug("Thinking...")
return audio_data
def adjust_threshold(self, energy, seconds_per_buffer):
if self.dynamic_energy_threshold and energy > 0:
# account for different chunk sizes and rates
damping = (
self.dynamic_energy_adjustment_damping ** seconds_per_buffer)
target_energy = energy * self.dynamic_energy_ratio
self.energy_threshold = (
self.energy_threshold * damping +
target_energy * (1 - damping))
|
clusterfudge/boomer
|
boomer/client/speech/mic.py
|
Python
|
gpl-3.0
| 10,725
|
#/usr/bin/env python
import os
from optparse import OptionParser
from xml.dom.minidom import parse
import mediauto.module
STANDARD_CONFIG_FILE_LOCATIONS = [
'/etc/mediauto.conf',
os.path.join(os.getenv('HOME'), '.mediauto/mediauto.conf'),
]
parser = OptionParser()
parser.add_option(
'-c',
'--config',
metavar='FILE',
dest='configfile',
help='path to the configuration file'
)
parser.add_option(
'-d',
'--dvd-destination',
metavar='DIR',
dest='dvd_destination',
help='DVD output directory'
)
parser.add_option(
'-m',
'--module-dir',
metavar='DIR',
dest='extra_module_dir',
help='Additional user module directory.'
)
class ConfigurationException(Exception):
def __init__(self, message): Exception.__init__(self, message)
def boolean_attribute(element, name):
S = element.getAttribute(name)
s = S.lower()
if s == 'true': return True
if s == 'yes': return True
if s == 'false': return False
if s == 'no': return False
raise ConfigurationException(
'Invalid boolean configuration value "%s" in attribute "%s".' % (name, S)
)
class Config:
def __init__(self):
self.programs = {}
self.insertrules = {}
self.variables = {}
moduledirlist = []
configfiles = [ x for x in STANDARD_CONFIG_FILE_LOCATIONS if os.path.isfile(x) ]
options, args = parser.parse_args()
# --config,-c
if options.configfile and os.path.isfile(options.configfile):
configfiles.append(options.configfile)
#print(str(configfiles))
for x in configfiles:
doc = parse(x)
# Programs
programs = doc.getElementsByTagName('program')
for program in programs:
self.set_program(
program.getAttribute('name'),
boolean_attribute(program, 'enabled'),
program.getAttribute('path')
)
# Media insertion rules
for insertrule in doc.getElementsByTagName('insert'):
self.set_insert_rule(
insertrule.getAttribute('media'),
[ x.getAttribute('name') for x in insertrule.getElementsByTagName('process') ]
)
# Variables
for variable in doc.getElementsByTagName('variable'):
self.set_variable(
variable.getAttribute('name'),
variable.getAttribute('value')
)
# Module directories
for moduledir in doc.getElementsByTagName('module-dir'):
path = moduledir.getAttribute('path')
if os.path.isdir(path): moduledirlist.append(path)
# --module-dir,-m
if options.extra_module_dir and os.path.isdir(options.extra_module_dir):
moduledirlist.append(options.extra_module_dir)
# --dvd-destination,-d
if options.dvd_destination:
self.variables['dvd.destination'] = options.dvd_destination
# Find plugins.
mediauto.module.scan_plugins(moduledirlist)
def set_variable(self, name, value):
self.variables[name] = value
def set_insert_rule(self, mediatype, processes):
self.insertrules[mediatype] = (mediatype, processes)
def set_program(self, name, enabled, path):
self.programs[name] = (name, enabled, path)
def variable(self, name):
if not name in self.variables: return None
return self.variables[name]
def insert_rule(self, mediatype):
if not mediatype in self.insertrules: return []
return self.insertrules[mediatype][1]
def program_enabled(self, name):
if not name in self.programs: return False
return self.programs[name][1]
def program_path(self, name):
if not name in self.programs: return None
return self.programs[name][2]
config = Config()
def variable(name):
return config.variable(name)
def insert_rule(mediatype):
return config.insert_rule(mediatype)
def program_enabled(name):
return config.program_enabled(name)
def program_path(name):
return config.program_path(name)
|
dfletcher/Mediauto
|
site-packages/mediauto/config.py
|
Python
|
gpl-2.0
| 3,995
|
from ete3 import PhyloTree, PhylomeDBConnector, SeqGroup
p = PhylomeDBConnector()
w,x, t = p.get_best_tree("Hsa0000001", 1)
a, l = p.get_clean_alg("Hsa0000001", 1)
A = SeqGroup(a, "iphylip")
for s in A.id2seq:
A.id2seq[s]=A.id2seq[s][:30]
t.link_to_alignment(A)
print t.get_species()
print t
t.set_outgroup(t&"Ddi0002240")
sp = PhyloTree("(((((((((((Hsa, Ptr), Mmu), ((Mms, Rno), (Bta, Cfa))), Mdo), Gga), Xtr), (Dre, Fru))),Cin) (Dme, Aga)), Ddi);")
reconciled, evs = t.reconcile(sp)
print reconciled
reconciled.show()
|
karrtikr/ete
|
sdoc/old_tutorial/alignment_visualization.py
|
Python
|
gpl-3.0
| 527
|
#!/usr/bin/env python
# coding: utf-8
"""
@Author: Well
@Date: 2015 - 07 - 12
"""
import urllib
# url encode
print urllib.quote('激战2')
print urllib.unquote("%E6%BF%80%E6%88%982")
|
neiltest/neil_learn_python
|
src/learn_python/python_other/neil_30_urlencode.py
|
Python
|
mit
| 189
|
# ===============================================================================
# Copyright 2017 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from __future__ import absolute_import
from pychron.pipeline.plot.panels.figure_panel import FigurePanel
from pychron.pipeline.plot.plotter.regression_series import RegressionSeries
from pychron.processing.analysis_graph import AnalysisStackedRegressionGraph
class RegressionSeriesPanel(FigurePanel):
_graph_klass = AnalysisStackedRegressionGraph
_figure_klass = RegressionSeries
equi_stack = True
# ============= EOF =============================================
|
USGSDenverPychron/pychron
|
pychron/pipeline/plot/panels/regression_series_panel.py
|
Python
|
apache-2.0
| 1,208
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from froi.algorithm import imtool
from froi.io.csv import get_cord_from_file
import numpy as np
import os
class RegularROIFromCSVFileDialog(QDialog):
"""A dialog for generate a regular ROI."""
def __init__(self, model, parent=None):
super(RegularROIFromCSVFileDialog, self).__init__(parent)
self._model = model
self._temp_dir = None
self._init_gui()
self._create_actions()
def _init_gui(self):
self.setWindowTitle("Generate Regular ROI based on CSV file")
cordinate_file_label= QLabel('Cordinate File :')
self._cordinate_file_dir = QLineEdit('')
self._cordinate_file_dir.setReadOnly(True)
self._cordinate_file_button = QPushButton('Browse')
out_label = QLabel("Output")
self.out_edit = QLineEdit()
self._create_output()
self.run_button = QPushButton("Run")
self.cancel_button = QPushButton("Cancel")
grid_layout = QGridLayout()
grid_layout.addWidget(cordinate_file_label, 0, 0)
grid_layout.addWidget(self._cordinate_file_dir, 0, 1)
grid_layout.addWidget(self._cordinate_file_button, 0, 2)
grid_layout.addWidget(out_label, 1, 0)
grid_layout.addWidget(self.out_edit, 1, 1, 1, 2)
hbox_layout = QHBoxLayout()
hbox_layout.addWidget(self.run_button)
hbox_layout.addWidget(self.cancel_button)
vbox_layout = QVBoxLayout()
vbox_layout.addLayout(grid_layout)
vbox_layout.addLayout(hbox_layout)
self.setLayout(vbox_layout)
def _create_actions(self):
self.run_button.clicked.connect(self._regular_roi)
self._cordinate_file_button.clicked.connect(self._cordinate_file_browse)
self.cancel_button.clicked.connect(self.done)
def _cordinate_file_browse(self):
cordinate_file_filepath = self._open_file_dialog("Add cordinate txt file.")
if cordinate_file_filepath is not None:
self._temp_dir = os.path.dirname(cordinate_file_filepath)
self._cordinate_file_dir.setText(cordinate_file_filepath)
def _open_file_dialog(self, title):
if self._temp_dir == None:
temp_dir = QDir.currentPath()
else:
temp_dir = self._temp_dir
file_name = QFileDialog.getOpenFileName(self,
title,
temp_dir,
"Cordinate files (*.txt *.csv)")
import sys
file_path = None
if not file_name.isEmpty():
if sys.platform == 'win32':
file_path = unicode(file_name).encode('gb2312')
else:
file_path = str(file_name)
return file_path
def _update_output_name(self):
row = self._model.currentIndex()
vol_name = self._model.data(row, Qt.DisplayRole)
output_name = '_'.join([str(vol_name), str('sphere'), 'ROI'])
self.out_edit.setText(output_name)
def _create_output(self):
self._update_output_name()
def _regular_roi(self):
out = self.out_edit.text()
cord_filepath = str(self._cordinate_file_dir.text())
if not out:
self.out_edit.setFocus()
return
roi_generater = imtool.sphere_roi
header = self._model.data(self._model.currentIndex(), Qt.UserRole + 11)
image_affine = self._model.get_affine()
data = self._model.data(self._model.currentIndex(), Qt.UserRole + 6)
new_data = np.zeros_like(data)
try:
coord_list, radius_list, id_list = get_cord_from_file(header, cord_filepath, image_affine)
except ValueError, error_info:
QMessageBox.critical(self, 'Please check the cordinate in the file.', str(error_info))
return
for idx in range(len(coord_list)):
new_data = roi_generater(new_data, coord_list[idx][0],
coord_list[idx][1], coord_list[idx][2],
radius_list[idx], id_list[idx])
self._model.addItem(new_data,
None,
out,
self._model._data[0].get_header(),
None, None, 255, 'rainbow')
self.done(0)
|
liuzhaoguo/FreeROI-1
|
froi/gui/component/regularroifromcsvfiledialog.py
|
Python
|
bsd-3-clause
| 4,533
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2021
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import json as json_lib
import pytest
try:
import ujson
except ImportError:
ujson = None
from telegram import TelegramObject
class TestTelegramObject:
def test_to_json_native(self, monkeypatch):
if ujson:
monkeypatch.setattr('ujson.dumps', json_lib.dumps)
# to_json simply takes whatever comes from to_dict, therefore we only need to test it once
telegram_object = TelegramObject()
# Test that it works with a dict with str keys as well as dicts as lists as values
d = {'str': 'str', 'str2': ['str', 'str'], 'str3': {'str': 'str'}}
monkeypatch.setattr('telegram.TelegramObject.to_dict', lambda _: d)
json = telegram_object.to_json()
# Order isn't guarantied
assert '"str": "str"' in json
assert '"str2": ["str", "str"]' in json
assert '"str3": {"str": "str"}' in json
# Now make sure that it doesn't work with not json stuff and that it fails loudly
# Tuples aren't allowed as keys in json
d = {('str', 'str'): 'str'}
monkeypatch.setattr('telegram.TelegramObject.to_dict', lambda _: d)
with pytest.raises(TypeError):
telegram_object.to_json()
@pytest.mark.skipif(not ujson, reason='ujson not installed')
def test_to_json_ujson(self, monkeypatch):
# to_json simply takes whatever comes from to_dict, therefore we only need to test it once
telegram_object = TelegramObject()
# Test that it works with a dict with str keys as well as dicts as lists as values
d = {'str': 'str', 'str2': ['str', 'str'], 'str3': {'str': 'str'}}
monkeypatch.setattr('telegram.TelegramObject.to_dict', lambda _: d)
json = telegram_object.to_json()
# Order isn't guarantied and ujon discards whitespace
assert '"str":"str"' in json
assert '"str2":["str","str"]' in json
assert '"str3":{"str":"str"}' in json
# Test that ujson allows tuples
# NOTE: This could be seen as a bug (since it's differnt from the normal "json",
# but we test it anyways
d = {('str', 'str'): 'str'}
monkeypatch.setattr('telegram.TelegramObject.to_dict', lambda _: d)
telegram_object.to_json()
def test_to_dict_private_attribute(self):
class TelegramObjectSubclass(TelegramObject):
__slots__ = ('a', '_b') # Added slots so that the attrs are converted to dict
def __init__(self):
self.a = 1
self._b = 2
subclass_instance = TelegramObjectSubclass()
assert subclass_instance.to_dict() == {'a': 1}
def test_slot_behaviour(self, recwarn, mro_slots):
inst = TelegramObject()
for attr in inst.__slots__:
assert getattr(inst, attr, 'err') != 'err', f"got extra slot '{attr}'"
assert not inst.__dict__, f"got missing slot(s): {inst.__dict__}"
assert len(mro_slots(inst)) == len(set(mro_slots(inst))), "duplicate slot"
inst.custom = 'should give warning'
assert len(recwarn) == 1 and 'custom' in str(recwarn[0].message), recwarn.list
def test_meaningless_comparison(self, recwarn):
expected_warning = "Objects of type TGO can not be meaningfully tested for equivalence."
class TGO(TelegramObject):
pass
a = TGO()
b = TGO()
assert a == b
assert len(recwarn) == 2
assert str(recwarn[0].message) == expected_warning
assert str(recwarn[1].message) == expected_warning
def test_meaningful_comparison(self, recwarn):
class TGO(TelegramObject):
_id_attrs = (1,)
a = TGO()
b = TGO()
assert a == b
assert len(recwarn) == 0
assert b == a
assert len(recwarn) == 0
|
leandrotoledo/python-telegram-bot
|
tests/test_telegramobject.py
|
Python
|
lgpl-3.0
| 4,625
|
../../../../share/pyshared/chardet/test.py
|
2ndy/RaspIM
|
usr/lib/python2.7/dist-packages/chardet/test.py
|
Python
|
gpl-2.0
| 42
|
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def bold(msg):
return BOLD + BLUE + msg + END
def warning(msg):
return YELLOW + msg + END
def fail():
return "[" + RED + "Fail" + END + "]"
def done():
return "[" + GREEN + "Done" + END + "]"
def error(msg):
return RED + msg + END
|
jamesblunt/chorus
|
packaging/setup/color.py
|
Python
|
apache-2.0
| 444
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Analytic Accounting',
'version': '1.1',
'author' : 'OpenERP SA',
'website' : 'http://www.openerp.com',
'category': 'Hidden/Dependency',
'depends' : ['base', 'decimal_precision', 'mail'],
'description': """
Module for defining analytic accounting object.
===============================================
In OpenERP, analytic accounts are linked to general accounts but are treated
totally independently. So, you can enter various different analytic operations
that have no counterpart in the general financial accounts.
""",
'data': [
'security/analytic_security.xml',
'security/ir.model.access.csv',
'analytic_sequence.xml',
'analytic_view.xml',
'analytic_data.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
aricchen/openHR
|
openerp/addons/analytic/__openerp__.py
|
Python
|
agpl-3.0
| 1,877
|
"""distutils.command.install
Implements the Distutils 'install' command."""
from distutils import log
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: install.py 62788 2008-05-06 22:41:46Z christian.heimes $"
import sys, os, string
from types import *
from distutils.core import Command
from distutils.debug import DEBUG
from distutils.sysconfig import get_config_vars
from distutils.errors import DistutilsPlatformError
from distutils.file_util import write_file
from distutils.util import convert_path, subst_vars, change_root
from distutils.util import get_platform
from distutils.errors import DistutilsOptionError
from site import USER_BASE
from site import USER_SITE
if sys.version < "2.2":
WINDOWS_SCHEME = {
'purelib': '$base',
'platlib': '$base',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
}
else:
WINDOWS_SCHEME = {
'purelib': '$base/Lib/site-packages',
'platlib': '$base/Lib/site-packages',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
}
INSTALL_SCHEMES = {
'unix_prefix': {
'purelib': '$base/lib/python$py_version_short/site-packages',
'platlib': '$platbase/lib/python$py_version_short/site-packages',
'headers': '$base/include/python$py_version_short/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
},
'unix_home': {
'purelib': '$base/lib/python',
'platlib': '$base/lib/python',
'headers': '$base/include/python/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
},
'unix_user': {
'purelib': '$usersite',
'platlib': '$usersite',
'headers': '$userbase/include/python$py_version_short/$dist_name',
'scripts': '$userbase/bin',
'data' : '$userbase',
},
'nt': WINDOWS_SCHEME,
'nt_user': {
'purelib': '$usersite',
'platlib': '$usersite',
'headers': '$userbase/Python$py_version_nodot/Include/$dist_name',
'scripts': '$userbase/Scripts',
'data' : '$userbase',
},
'mac': {
'purelib': '$base/Lib/site-packages',
'platlib': '$base/Lib/site-packages',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
},
'mac_user': {
'purelib': '$usersite',
'platlib': '$usersite',
'headers': '$userbase/$py_version_short/include/$dist_name',
'scripts': '$userbase/bin',
'data' : '$userbase',
},
'os2': {
'purelib': '$base/Lib/site-packages',
'platlib': '$base/Lib/site-packages',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
},
'os2_home': {
'purelib': '$usersite',
'platlib': '$usersite',
'headers': '$userbase/include/python$py_version_short/$dist_name',
'scripts': '$userbase/bin',
'data' : '$userbase',
},
}
# The keys to an installation scheme; if any new types of files are to be
# installed, be sure to add an entry to every installation scheme above,
# and to SCHEME_KEYS here.
SCHEME_KEYS = ('purelib', 'platlib', 'headers', 'scripts', 'data')
class install (Command):
description = "install everything from build directory"
user_options = [
# Select installation scheme and set base director(y|ies)
('prefix=', None,
"installation prefix"),
('exec-prefix=', None,
"(Unix only) prefix for platform-specific files"),
('home=', None,
"(Unix only) home directory to install under"),
('user', None,
"install in user site-package '%s'" % USER_SITE),
# Or, just set the base director(y|ies)
('install-base=', None,
"base installation directory (instead of --prefix or --home)"),
('install-platbase=', None,
"base installation directory for platform-specific files " +
"(instead of --exec-prefix or --home)"),
('root=', None,
"install everything relative to this alternate root directory"),
# Or, explicitly set the installation scheme
('install-purelib=', None,
"installation directory for pure Python module distributions"),
('install-platlib=', None,
"installation directory for non-pure module distributions"),
('install-lib=', None,
"installation directory for all module distributions " +
"(overrides --install-purelib and --install-platlib)"),
('install-headers=', None,
"installation directory for C/C++ headers"),
('install-scripts=', None,
"installation directory for Python scripts"),
('install-data=', None,
"installation directory for data files"),
# Byte-compilation options -- see install_lib.py for details, as
# these are duplicated from there (but only install_lib does
# anything with them).
('compile', 'c', "compile .py to .pyc [default]"),
('no-compile', None, "don't compile .py files"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
# Miscellaneous control options
('force', 'f',
"force installation (overwrite any existing files)"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
# Where to install documentation (eventually!)
#('doc-format=', None, "format of documentation to generate"),
#('install-man=', None, "directory for Unix man pages"),
#('install-html=', None, "directory for HTML documentation"),
#('install-info=', None, "directory for GNU info files"),
('record=', None,
"filename in which to record list of installed files"),
]
boolean_options = ['compile', 'force', 'skip-build', 'user']
negative_opt = {'no-compile' : 'compile'}
def initialize_options (self):
# High-level options: these select both an installation base
# and scheme.
self.prefix = None
self.exec_prefix = None
self.home = None
self.user = 0
# These select only the installation base; it's up to the user to
# specify the installation scheme (currently, that means supplying
# the --install-{platlib,purelib,scripts,data} options).
self.install_base = None
self.install_platbase = None
self.root = None
# These options are the actual installation directories; if not
# supplied by the user, they are filled in using the installation
# scheme implied by prefix/exec-prefix/home and the contents of
# that installation scheme.
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_userbase = USER_BASE
self.install_usersite = USER_SITE
self.compile = None
self.optimize = None
# These two are for putting non-packagized distributions into their
# own directory and creating a .pth file if it makes sense.
# 'extra_path' comes from the setup file; 'install_path_file' can
# be turned off if it makes no sense to install a .pth file. (But
# better to install it uselessly than to guess wrong and not
# install it when it's necessary and would be used!) Currently,
# 'install_path_file' is always true unless some outsider meddles
# with it.
self.extra_path = None
self.install_path_file = 1
# 'force' forces installation, even if target files are not
# out-of-date. 'skip_build' skips running the "build" command,
# handy if you know it's not necessary. 'warn_dir' (which is *not*
# a user option, it's just there so the bdist_* commands can turn
# it off) determines whether we warn about installing to a
# directory not in sys.path.
self.force = 0
self.skip_build = 0
self.warn_dir = 1
# These are only here as a conduit from the 'build' command to the
# 'install_*' commands that do the real work. ('build_base' isn't
# actually used anywhere, but it might be useful in future.) They
# are not user options, because if the user told the install
# command where the build directory is, that wouldn't affect the
# build command.
self.build_base = None
self.build_lib = None
# Not defined yet because we don't know anything about
# documentation yet.
#self.install_man = None
#self.install_html = None
#self.install_info = None
self.record = None
# -- Option finalizing methods -------------------------------------
# (This is rather more involved than for most commands,
# because this is where the policy for installing third-
# party Python modules on various platforms given a wide
# array of user input is decided. Yes, it's quite complex!)
def finalize_options (self):
# This method (and its pliant slaves, like 'finalize_unix()',
# 'finalize_other()', and 'select_scheme()') is where the default
# installation directories for modules, extension modules, and
# anything else we care to install from a Python module
# distribution. Thus, this code makes a pretty important policy
# statement about how third-party stuff is added to a Python
# installation! Note that the actual work of installation is done
# by the relatively simple 'install_*' commands; they just take
# their orders from the installation directory options determined
# here.
# Check for errors/inconsistencies in the options; first, stuff
# that's wrong on any platform.
if ((self.prefix or self.exec_prefix or self.home) and
(self.install_base or self.install_platbase)):
raise DistutilsOptionError, \
("must supply either prefix/exec-prefix/home or " +
"install-base/install-platbase -- not both")
if self.home and (self.prefix or self.exec_prefix):
raise DistutilsOptionError, \
"must supply either home or prefix/exec-prefix -- not both"
if self.user and (self.prefix or self.exec_prefix or self.home or
self.install_base or self.install_platbase):
raise DistutilsOptionError("can't combine user with with prefix/"
"exec_prefix/home or install_(plat)base")
# Next, stuff that's wrong (or dubious) only on certain platforms.
if os.name != "posix":
if self.exec_prefix:
self.warn("exec-prefix option ignored on this platform")
self.exec_prefix = None
# Now the interesting logic -- so interesting that we farm it out
# to other methods. The goal of these methods is to set the final
# values for the install_{lib,scripts,data,...} options, using as
# input a heady brew of prefix, exec_prefix, home, install_base,
# install_platbase, user-supplied versions of
# install_{purelib,platlib,lib,scripts,data,...}, and the
# INSTALL_SCHEME dictionary above. Phew!
self.dump_dirs("pre-finalize_{unix,other}")
if os.name == 'posix':
self.finalize_unix()
else:
self.finalize_other()
self.dump_dirs("post-finalize_{unix,other}()")
# Expand configuration variables, tilde, etc. in self.install_base
# and self.install_platbase -- that way, we can use $base or
# $platbase in the other installation directories and not worry
# about needing recursive variable expansion (shudder).
py_version = (string.split(sys.version))[0]
(prefix, exec_prefix) = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
'userbase': self.install_userbase,
'usersite': self.install_usersite,
}
self.expand_basedirs()
self.dump_dirs("post-expand_basedirs()")
# Now define config vars for the base directories so we can expand
# everything else.
self.config_vars['base'] = self.install_base
self.config_vars['platbase'] = self.install_platbase
if DEBUG:
from pprint import pprint
print "config vars:"
pprint(self.config_vars)
# Expand "~" and configuration variables in the installation
# directories.
self.expand_dirs()
self.dump_dirs("post-expand_dirs()")
# Create directories in the home dir:
if self.user:
self.create_home_path()
# Pick the actual directory to install all modules to: either
# install_purelib or install_platlib, depending on whether this
# module distribution is pure or not. Of course, if the user
# already specified install_lib, use their selection.
if self.install_lib is None:
if self.distribution.ext_modules: # has extensions: non-pure
self.install_lib = self.install_platlib
else:
self.install_lib = self.install_purelib
# Convert directories from Unix /-separated syntax to the local
# convention.
self.convert_paths('lib', 'purelib', 'platlib',
'scripts', 'data', 'headers',
'userbase', 'usersite')
# Well, we're not actually fully completely finalized yet: we still
# have to deal with 'extra_path', which is the hack for allowing
# non-packagized module distributions (hello, Numerical Python!) to
# get their own directories.
self.handle_extra_path()
self.install_libbase = self.install_lib # needed for .pth file
self.install_lib = os.path.join(self.install_lib, self.extra_dirs)
# If a new root directory was supplied, make all the installation
# dirs relative to it.
if self.root is not None:
self.change_roots('libbase', 'lib', 'purelib', 'platlib',
'scripts', 'data', 'headers')
self.dump_dirs("after prepending root")
# Find out the build directories, ie. where to install from.
self.set_undefined_options('build',
('build_base', 'build_base'),
('build_lib', 'build_lib'))
# Punt on doc directories for now -- after all, we're punting on
# documentation completely!
# finalize_options ()
def dump_dirs (self, msg):
if DEBUG:
from distutils.fancy_getopt import longopt_xlate
print msg + ":"
for opt in self.user_options:
opt_name = opt[0]
if opt_name[-1] == "=":
opt_name = opt_name[0:-1]
if opt_name in self.negative_opt:
opt_name = string.translate(self.negative_opt[opt_name],
longopt_xlate)
val = not getattr(self, opt_name)
else:
opt_name = string.translate(opt_name, longopt_xlate)
val = getattr(self, opt_name)
print " %s: %s" % (opt_name, val)
def finalize_unix (self):
if self.install_base is not None or self.install_platbase is not None:
if ((self.install_lib is None and
self.install_purelib is None and
self.install_platlib is None) or
self.install_headers is None or
self.install_scripts is None or
self.install_data is None):
raise DistutilsOptionError, \
("install-base or install-platbase supplied, but "
"installation scheme is incomplete")
return
if self.user:
if self.install_userbase is None:
raise DistutilsPlatformError(
"User base directory is not specified")
self.install_base = self.install_platbase = self.install_userbase
self.select_scheme("unix_user")
elif self.home is not None:
self.install_base = self.install_platbase = self.home
self.select_scheme("unix_home")
else:
if self.prefix is None:
if self.exec_prefix is not None:
raise DistutilsOptionError, \
"must not supply exec-prefix without prefix"
self.prefix = os.path.normpath(sys.prefix)
self.exec_prefix = os.path.normpath(sys.exec_prefix)
else:
if self.exec_prefix is None:
self.exec_prefix = self.prefix
self.install_base = self.prefix
self.install_platbase = self.exec_prefix
self.select_scheme("unix_prefix")
# finalize_unix ()
def finalize_other (self): # Windows and Mac OS for now
if self.user:
if self.install_userbase is None:
raise DistutilsPlatformError(
"User base directory is not specified")
self.install_base = self.install_platbase = self.install_userbase
self.select_scheme(os.name + "_user")
elif self.home is not None:
self.install_base = self.install_platbase = self.home
self.select_scheme("unix_home")
else:
if self.prefix is None:
self.prefix = os.path.normpath(sys.prefix)
self.install_base = self.install_platbase = self.prefix
try:
self.select_scheme(os.name)
except KeyError:
raise DistutilsPlatformError, \
"I don't know how to install stuff on '%s'" % os.name
# finalize_other ()
def select_scheme (self, name):
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def _expand_attrs (self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs (self):
self._expand_attrs(['install_base',
'install_platbase',
'root'])
def expand_dirs (self):
self._expand_attrs(['install_purelib',
'install_platlib',
'install_lib',
'install_headers',
'install_scripts',
'install_data',])
def convert_paths (self, *names):
for name in names:
attr = "install_" + name
setattr(self, attr, convert_path(getattr(self, attr)))
def handle_extra_path (self):
if self.extra_path is None:
self.extra_path = self.distribution.extra_path
if self.extra_path is not None:
if type(self.extra_path) is StringType:
self.extra_path = string.split(self.extra_path, ',')
if len(self.extra_path) == 1:
path_file = extra_dirs = self.extra_path[0]
elif len(self.extra_path) == 2:
(path_file, extra_dirs) = self.extra_path
else:
raise DistutilsOptionError, \
("'extra_path' option must be a list, tuple, or "
"comma-separated string with 1 or 2 elements")
# convert to local form in case Unix notation used (as it
# should be in setup scripts)
extra_dirs = convert_path(extra_dirs)
else:
path_file = None
extra_dirs = ''
# XXX should we warn if path_file and not extra_dirs? (in which
# case the path file would be harmless but pointless)
self.path_file = path_file
self.extra_dirs = extra_dirs
# handle_extra_path ()
def change_roots (self, *names):
for name in names:
attr = "install_" + name
setattr(self, attr, change_root(self.root, getattr(self, attr)))
def create_home_path(self):
"""Create directories under ~
"""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in self.config_vars.iteritems():
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0700)" % path)
os.makedirs(path, 0700)
# -- Command execution methods -------------------------------------
def run (self):
# Obviously have to build before we can install
if not self.skip_build:
self.run_command('build')
# If we built for any other platform, we can't install.
build_plat = self.distribution.get_command_obj('build').plat_name
# check warn_dir - it is a clue that the 'install' is happening
# internally, and not to sys.path, so we don't check the platform
# matches what we are running.
if self.warn_dir and build_plat != get_platform():
raise DistutilsPlatformError("Can't install when "
"cross-compiling")
# Run all sub-commands (at least those that need to be run)
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
if self.path_file:
self.create_path_file()
# write list of installed files, if requested.
if self.record:
outputs = self.get_outputs()
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in xrange(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
self.execute(write_file,
(self.record, outputs),
"writing list of installed files to '%s'" %
self.record)
sys_path = map(os.path.normpath, sys.path)
sys_path = map(os.path.normcase, sys_path)
install_lib = os.path.normcase(os.path.normpath(self.install_lib))
if (self.warn_dir and
not (self.path_file and self.install_path_file) and
install_lib not in sys_path):
log.debug(("modules installed to '%s', which is not in "
"Python's module search path (sys.path) -- "
"you'll have to change the search path yourself"),
self.install_lib)
# run ()
def create_path_file (self):
filename = os.path.join(self.install_libbase,
self.path_file + ".pth")
if self.install_path_file:
self.execute(write_file,
(filename, [self.extra_dirs]),
"creating %s" % filename)
else:
self.warn("path file '%s' not created" % filename)
# -- Reporting methods ---------------------------------------------
def get_outputs (self):
# Assemble the outputs of all the sub-commands.
outputs = []
for cmd_name in self.get_sub_commands():
cmd = self.get_finalized_command(cmd_name)
# Add the contents of cmd.get_outputs(), ensuring
# that outputs doesn't contain duplicate entries
for filename in cmd.get_outputs():
if filename not in outputs:
outputs.append(filename)
if self.path_file and self.install_path_file:
outputs.append(os.path.join(self.install_libbase,
self.path_file + ".pth"))
return outputs
def get_inputs (self):
# XXX gee, this looks familiar ;-(
inputs = []
for cmd_name in self.get_sub_commands():
cmd = self.get_finalized_command(cmd_name)
inputs.extend(cmd.get_inputs())
return inputs
# -- Predicates for sub-command list -------------------------------
def has_lib (self):
"""Return true if the current distribution has any Python
modules to install."""
return (self.distribution.has_pure_modules() or
self.distribution.has_ext_modules())
def has_headers (self):
return self.distribution.has_headers()
def has_scripts (self):
return self.distribution.has_scripts()
def has_data (self):
return self.distribution.has_data_files()
# 'sub_commands': a list of commands this command might have to run to
# get its work done. See cmd.py for more info.
sub_commands = [('install_lib', has_lib),
('install_headers', has_headers),
('install_scripts', has_scripts),
('install_data', has_data),
('install_egg_info', lambda self:True),
]
# class install
|
leighpauls/k2cro4
|
third_party/python_26/Lib/distutils/command/install.py
|
Python
|
bsd-3-clause
| 26,777
|
#!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
try:
from azure_bdist_wheel import cmdclass
except ImportError:
from distutils import log as logger
logger.warn("Wheel is not available, disabling bdist_wheel hook")
cmdclass = {}
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-mgmt-compute"
PACKAGE_PPRINT_NAME = "Compute Management"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.rst', encoding='utf-8') as f:
readme = f.read()
with open('HISTORY.rst', encoding='utf-8') as f:
history = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + history,
license='MIT License',
author='Microsoft Corporation',
author_email='azpysdkhelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=["tests"]),
install_requires=[
'msrestazure>=0.4.20,<2.0.0',
'azure-common~=1.1',
],
cmdclass=cmdclass
)
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-compute/setup.py
|
Python
|
mit
| 2,784
|
import time
import os
import glog
from daisy import executeCmds
def start_benchmark(algo, num_of_chains, mbps, chain_index=None, isIperf=False):
""" Allocate E2 style chains. """
# list of commands to execute one-by-one
cmds = []
if isIperf:
glog.info('Launching iperf instead of tcpreplay...')
dirname = "%s%s-iperf" % (algo, str(mbps))
else:
dirname = "%s%s" % (algo, str(mbps))
cmds.append('sudo rm -f ./results/allocation/%s/*.csv' %
(dirname))
executeCmds(cmds)
cmds[:] = []
# # copy the traces into the containers for tcpreplay, this might take a while
if not isIperf:
glog.info('Copying traces into the containers...')
if chain_index is None:
for chain_index in range(num_of_chains):
cmds.append('sudo docker cp ../traces/output.pcap mn.chain%d-source:/' % chain_index)
else:
cmds.append('sudo docker cp ../traces/output.pcap mn.chain%d-source:/' % chain_index)
executeCmds(cmds)
cmds[:] = []
# # copy the traces into the containers for tcpreplay, this might take a while
glog.info('Running dstat...')
if chain_index is None:
for chain_index in range(num_of_chains):
cmds.append('sudo docker exec -d mn.chain%d-sink dstat --net --time -N intf2 --bits --output /tmp/dstat.csv' % chain_index)
if isIperf:
cmds.append('sudo docker exec mn.chain%d-sink iperf3 -s' % chain_index)
else:
cmds.append('sudo docker exec -d mn.chain%d-sink dstat --net --time -N intf2 --bits --output /tmp/dstat.csv' % chain_index)
if isIperf:
cmds.append('sudo docker exec -d mn.chain%d-sink iperf3 -s' % chain_index)
executeCmds(cmds)
cmds[:] = []
print('>>> wait 2s for dstats to initialize')
time.sleep(2)
print('<<< wait complete.')
if chain_index is None:
for chain_index in range(num_of_chains):
# each loop is around 1s for 10 Mbps speed, 100 loops easily make 1m
if isIperf:
cmds.append('sudo docker exec -d mn.chain%d-source iperf3 --zerocopy -b %dm -c 10.0.10.10' %
(chain_index, mbps))
else:
cmds.append('sudo docker exec -d mn.chain%d-source tcpreplay --loop=0 --mbps=%d -d 1 --intf1=intf1 output.pcap' %
(chain_index, mbps))
else:
# each loop is around 1s for 10 Mbps speed, 100 loops easily make 1m
if isIperf:
cmds.append('sudo docker exec -d mn.chain%d-source iperf3 --zerocopy -t 86400 -b %dm -c 10.0.10.10' %
(chain_index, mbps))
else:
cmds.append('sudo docker exec -d mn.chain%d-source tcpreplay --loop=0 --mbps=%d -d 1 --intf1=intf1 output.pcap' %
(chain_index, mbps))
executeCmds(cmds)
cmds[:] = []
def finish_benchmark(algo, num_of_chains, mbps, isIperf=False):
# list of commands to execute one-by-one
cmds = []
if isIperf:
dirname = "%s%s-iperf" % (algo, str(mbps))
else:
dirname = "%s%s" % (algo, str(mbps))
# kill existing tcpreplay and dstat
# for chain_index in range(num_of_chains):
# cmds.append(
# 'sudo docker exec mn.chain%d-source pkill tcpreplay' % chain_index)
# cmds.append(
# 'sudo docker exec mn.chain%d-sink pkill python2' % chain_index)
cmds.append("sudo killall tcpreplay")
cmds.append("sudo killall python2")
cmds.append("sudo killall iperf3")
executeCmds(cmds)
cmds[:] = []
print('>>> wait 10s for dstats to terminate')
time.sleep(10)
print('<<< wait complete.')
# create the target folder if it does not exist
dir = 'results/iter-allocation/%s' % (dirname)
if not os.path.exists(dir):
os.makedirs(dir)
# copy .csv results from VNF to the host
for chain_index in range(num_of_chains):
cmds.append('sudo docker cp mn.chain%s-sink:/tmp/dstat.csv ./results/iter-allocation/%s/e2-allocate-from-chain%s-sink.csv' %
(str(chain_index), dirname, str(chain_index)))
executeCmds(cmds)
cmds[:] = []
# remove dstat output files
for chain_index in range(num_of_chains):
cmds.append('sudo docker exec mn.chain%d-sink rm /tmp/dstat.csv' % chain_index)
executeCmds(cmds)
cmds[:] = []
print('done')
|
knodir/son-emu
|
scenarios/experiments/bench.py
|
Python
|
apache-2.0
| 4,445
|
## __BEGIN_LICENSE__
## Copyright (C) 2006-2010 United States Government as represented by
## the Administrator of the National Aeronautics and Space Administration
## All Rights Reserved.
## __END_LICENSE__
import layermanager_client as lmc
import inspect
_cms_client = None
def get_default_client():
from config import cms_connection_options
global _cms_client
if not _cms_client:
_cms_client = lmc.LayersManagerClient(*cms_connection_options)
return _cms_client
class MissingPropertyError(Exception): pass
def class_for_kind(kind):
result = None
classes = [c for c in globals().values() if inspect.isclass(c)]
for klass in classes:
if issubclass(klass, CmsObject) and klass.kind == kind:
if result is not None:
raise Exception("More than one class matches kind '%s'" % kind)
result = klass
if not result:
raise Exception("class not found for kind '%s'" % kind)
return result
class CmsObject(object):
"""
This is the base class for the object oriented interface to the Layers CMS.
It manages the business of creating objects and managing properties.
It will lazily load object propeties only when nessecary.
To update objects, set the property values and call the save() method.
"""
kind = None # Override this in subclasses
@classmethod
def fetch_or_create(klass, kind, layer_id=None, *args, **kwargs):
"""
If records exist on the CMS server that match the given keywords, get_or_create()
will return a list of all matching objects (as instances of the appropriate CmsObject subclass).
Otherwise, it will create a new object, save it to the server, and return a single-element
list containing the new object.
"""
def stringify_keys(dictionary):
return dict((str(k), v) for (k,v) in dictionary.items())
cms = get_default_client()
if kind == 'layer':
kwargs['nocontents'] = True
if layer_id:
kwargs['id'] = layer_id
else:
assert layer_id is not None
if 'id' in kwargs:
# Query and return [] wrapped result
return [cms.Query(kind, layer_id, kwargs['id'])]
else:
# List and iterate
if kind == 'layer':
ids = cms.List(kind, **kwargs)
else:
ids = cms.List(kind, layer_id, **kwargs)
results = []
for id in ids:
if kind == 'layer':
layer_id = id
properties = cms.Query(kind, layer_id, id)
for k, v in kwargs.items():
if k != 'nocontents' and properties[k] != v:
continue
else:
results.append(properties)
if len(results) > 0:
results = [class_for_kind(kind)(layer_id, **stringify_keys(props)) for props in results]
return results
else:
# create!
new_cms_obj = class_for_kind(kind)(layer_id, **kwargs)
new_cms_obj.save()
return [new_cms_obj]
@classmethod
def get_first_by_name(klass, name, layer_id=None, cms=get_default_client()):
kind = klass.kind
if not layer_id and kind != 'layer':
raise Exception("layer id required")
print "Fetching existing layer %s..." % name,
ids = cms.List(kind)
i = 0
for item_id in ids:
item = cms.Query(kind, item_id, layer_id, nocontents='true')
if item['name'] == name:
print "Done."
return item_id
else:
raise Exception('%s "%s" not found.' % (kind, name))
def __init__(self, layer_id=0, **kwargs):
kind = self.kind
self._property_names = lmc.KNOWN_CMS_ARGUMENTS[kind]
self._required_properties = lmc.REQUIRED_CMS_ARGUMENTS[kind]
self.cms = kwargs.get('cms', None) or get_default_client()
self._kind = kind
self.id = kwargs.get('id', None)
self.layer_id = layer_id
self._properties_loaded = False
self._properties_updated = False
self._properties = {}
for k, v in kwargs.items():
self.__setattr__(k, v)
def _load_properties(self):
assert not self.is_unsaved # This shouldn't be called on a new, unsaved object
tmp_new_properties = self._properties
properties = self.cms.Query(self._kind, self.layer_id, self.id)
self._properties = dict((str(k), v) for k, v in properties.items()) # convert keys from unicode to string (so they can be kwargs)
assert type(self._properties) == dict
self._properties.update(tmp_new_properties)
self._properties_loaded = True
@property
def is_unsaved(self):
return not self.id
def __getattr__(self, name):
if name in self._property_names:
if name not in self._properties and not self._properties_loaded:
if self.is_unsaved: # New, unsaved object. No properties to retrieve.
return None
else:
self._load_properties()
return self._properties[name]
else:
raise AttributeError("No property: %s" % name)
def __setattr__(self, name, value):
if name == '_property_names':
self.__dict__[name] = value
if name in self._property_names:
self._properties[name] = value
self._properties_updated = True
else:
self.__dict__[name] = value
def save(self):
for prop in self._required_properties:
if prop not in self._properties or not self._properties[prop]:
raise MissingPropertyError('%s object missing required property "%s".' % (self.__class__.__name__, prop))
if self.is_unsaved:
self.id = self.cms.Create(self._kind, self.layer_id, **self._properties)
else:
if not self._properties_updated:
return None #nothing to save
self.cms.Update(self._kind, self.layer_id, **self._properties)
self._properties_updated = False
return True
class Layer(CmsObject):
# TODO: Add icon upload functionality... (call FetchAndUpload)
kind = 'layer'
def __init__(self, id=0, **kwargs):
CmsObject.__init__(self, layer_id=id, **kwargs)
def save(self):
is_new_layer = self.is_unsaved
CmsObject.save(self)
if is_new_layer:
self.layer_id = self.id # is this actually necessary? (yes)
class Entity(CmsObject):
kind = 'entity'
def save(self):
self.properties = self.cms._StandardizeEntity(self.layer_id, self.properties)
CmsObject.save(self)
class Schema(CmsObject):
kind = 'schema'
class Field(CmsObject):
kind = 'field'
class Style(CmsObject):
kind = 'style'
class Link(CmsObject):
kind = 'link'
class Region(CmsObject):
kind = 'region'
class Folder(CmsObject):
kind = 'folder'
def batch_create_entities(layer, entities, retries=1, cms=get_default_client()):
assert type(layer) in (int, Layer)
if isinstance(layer, Layer):
layer_id = layer.id
else:
layer_id = layer
return cms.BatchCreateEntities(layer_id, [e.properties for e in entities], retries=retries)
|
deleted/kml-layer-manager
|
client/layers.py
|
Python
|
apache-2.0
| 7,490
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-06 22:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0006_auto_20170302_2255'),
]
operations = [
migrations.AlterField(
model_name='rastrexo',
name='kind',
field=models.TextField(choices=[('FRECHAS', 'frechas'), ('BRUXULA', 'brúxula'), ('MIXTO', 'mixto'), ('ANDAINA', 'andaina'), ('SE', 'sen especificar')], default='SE', verbose_name='tipo'),
),
]
|
rastrexando-eu/rastrexando-eu
|
core/migrations/0007_auto_20170306_2211.py
|
Python
|
gpl-3.0
| 598
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import babel.dates
import email.utils
import pretend
from warehouse.i18n import filters
def test_format_date(monkeypatch):
formatted = pretend.stub()
format_date = pretend.call_recorder(lambda *a, **kw: formatted)
monkeypatch.setattr(babel.dates, "format_date", format_date)
request = pretend.stub(locale=pretend.stub())
ctx = pretend.stub(get=pretend.call_recorder(lambda k: request))
args = [pretend.stub(), pretend.stub()]
kwargs = {"foo": pretend.stub()}
assert filters.format_date(ctx, *args, **kwargs) is formatted
kwargs.update({"locale": request.locale})
assert format_date.calls == [pretend.call(*args, **kwargs)]
def test_format_datetime(monkeypatch):
formatted = pretend.stub()
format_datetime = pretend.call_recorder(lambda *a, **kw: formatted)
monkeypatch.setattr(babel.dates, "format_datetime", format_datetime)
request = pretend.stub(locale=pretend.stub())
ctx = pretend.stub(get=pretend.call_recorder(lambda k: request))
args = [pretend.stub(), pretend.stub()]
kwargs = {"foo": pretend.stub()}
assert filters.format_datetime(ctx, *args, **kwargs) is formatted
kwargs.update({"locale": request.locale})
assert format_datetime.calls == [pretend.call(*args, **kwargs)]
def test_format_rfc822_datetime(monkeypatch):
formatted = pretend.stub()
formatdate = pretend.call_recorder(lambda *a, **kw: formatted)
monkeypatch.setattr(email.utils, "formatdate", formatdate)
ctx = pretend.stub()
timestamp = pretend.stub()
args = [pretend.stub(timestamp=lambda: timestamp), pretend.stub()]
kwargs = {"foo": pretend.stub()}
assert filters.format_rfc822_datetime(ctx, *args, **kwargs) is formatted
assert formatdate.calls == [pretend.call(timestamp, usegmt=True)]
|
karan/warehouse
|
tests/unit/i18n/test_filters.py
|
Python
|
apache-2.0
| 2,338
|
#!/usr/bin/env python
import math
import sys
from numpy import float32, putmask, shape, zeros
# This is the average of all species in the alignment outside of exons
# > mean(r)
# A T C G
# 0.2863776 0.2878264 0.2129560 0.2128400
# > sd(r)
# A T C G
# 0.01316192 0.01371148 0.01293836 0.01386655
ENCODE_NONCODING_BACKGROUND = {'A': 0.2863776, 'T': 0.2878264, 'G': 0.2128400, 'C': 0.2129560}
class Align:
def __init__(self, seqrows, headers=None):
self.rows = seqrows
self.nrows = len(seqrows)
ncol = None
for rownum, row in enumerate(self.rows):
try:
if ncol is None:
ncol = len(row)
elif ncol != len(row):
raise ValueError("Align: __init__:alignment block:row %d does not have %d columns, it has %d" % (rownum, ncol, len(row)))
except Exception:
print(row)
raise Exception('')
self.ncols = ncol
self.dims = (self.nrows, self.ncols)
self.headers = headers
def __str__(self):
return "\n".join(self.rows)
class AlignScoreMatrix:
def __init__(self, align):
nan = float('nan')
matrix = zeros((align.nrows, align.ncols), float32)
# set to nans
for ir in range(len(matrix)):
for ic in range(len(matrix[ir])):
matrix[ir][ic] = nan
self.matrix = matrix
def __len__(self):
return shape(self.matrix)[1]
def __str__(self):
print(self.matrix)
def score_align_motif(align, motif, gapmask=None, byPosition=True):
chr, chr_start, chr_stop = align.headers[0]
# a blank score matrix
nrows, ncols = align.dims
ascoremax = AlignScoreMatrix(align)
scoremax = ascoremax.matrix
minSeqLen = len(motif)
for ir in range(nrows):
pass
# row is missing data
if isnan(align.rows[ir][0]):
continue
for start in range(ncols):
if align.rows[ir][start] == '-':
continue
elif align.rows[ir][start] == 'n':
continue
elif align.rows[ir][start] == 'N':
continue
# get enough sequence for the weight matrix
subseq = ""
end = 0
ic = start
while len(subseq) < minSeqLen:
if ic >= len(align.rows[ir]):
break
char = align.rows[ir][ic].upper()
ic += 1
if char == '-' or char == 'N':
continue
else:
subseq += char
if len(subseq) == minSeqLen:
end = ic+1
for_score = int(match_consensus(subseq, motif))
revseq = reverse_complement(subseq)
rev_score = int(match_consensus(revseq, motif))
score = max(for_score, rev_score)
# dbg
# if ir == 0: print >>sys.stderr, int(chr_start) + start - align.rows[ir].count('-',0,start), subseq, score
# replace the alignment positions with the result
if byPosition:
scoremax[ir][start] = score
else:
# replace positions matching the width of the pwm
for i in range(start, end):
if isnan(scoremax[ir][i]):
scoremax[ir][i] = score
elif score > scoremax[ir][i]:
scoremax[ir][i] = score
# break
# mask gap characters
if gapmask is None:
gapmask = score_align_gaps(align)
putmask(scoremax, gapmask, float('nan'))
return scoremax
# -----------
#
# WeightMatrix--
# A position weight matrix (PWM) representation of a motif.
#
# ----------
# construction arguments:
# id: id (name) of the motif
# rows: the matrix; each row is a hash from symbol to weight, with
# .. the weight in string form
# alphabet: symbols allowed
# background: hash from symbol to background probability of that symbol; if
# .. not specified, ENCODE_NONCODING_BACKGROUND is used
# internal fields:
# rows: the matrix; each row is a hash from symbol to log-odds score
# .. of that symbol for that row of the weight matrix
# counts: the matrix; count[row][sym] is the weight, as an integer
# probs: the matrix; probs[row][sym] is the weight, as an probability
# ----------
class PositionWeightMatrix:
complementMap = str.maketrans("ACGTacgt", "TGCAtgca")
# IUPAC-IUB
symbols = {
'A': frozenset(['A']),
'C': frozenset(['C']),
'G': frozenset(['G']),
'T': frozenset(['T']),
'R': frozenset(['A', 'G']),
'Y': frozenset(['C', 'T']),
'M': frozenset(['A', 'C']),
'K': frozenset(['G', 'T']),
'S': frozenset(['G', 'C']),
'W': frozenset(['A', 'T']),
'H': frozenset(['A', 'C', 'T']),
'B': frozenset(['G', 'T', 'C']),
'V': frozenset(['G', 'C', 'A']),
'D': frozenset(['G', 'T', 'A'])}
def __init__(self, id, rows, alphabet, background=None, score_correction=True):
self.id = id
self.alphabet = alphabet
nsymbols = len(self.alphabet)
for i in range(len(self.alphabet)):
self.alphabet[i] = self.alphabet[i].upper()
if background is not None:
self.background = background
else:
self.background = {}
sorted_alphabet = []
sorted_alphabet[:] = self.alphabet[:]
sorted_alphabet.sort()
if ['A', 'C', 'G', 'T'] == sorted_alphabet:
self.background = ENCODE_NONCODING_BACKGROUND
else:
for x in self.alphabet:
self.background[x] = float(1)/len(self.alphabet)
if score_correction:
self.score_correction = self.corrected_probability_score
else:
self.score_correction = self.simple_probability
# partition counts from consensus symbol
# in order to properly handle scaling in the presense of non-integers,
# we prescan the matrix to figure out the largest scale factor, then go
# back through and scale 'em all (some rows may be integer counts,
# others may be probabilities)
self.consensus = []
scale = 1
for i in range(len(rows)):
# try:
fields, consensus = rows[i][:nsymbols], rows[i][-1]
for x, count in enumerate(fields):
try:
(w, s) = self.parse_weight(count)
except ValueError:
raise ValueError("pwm row {} has bad weight {}".format(" ".join(fields), w))
# replace row counts with (values,scale)
rows[i][x] = (w, s)
scale = max(s, scale)
# except:
# print >>sys.stderr,rows
# raise ValueError
# raise ValueError, "pwm row %s has wrong field count" % " ".join(fields)
self.consensus.append(consensus)
hashRows = []
self.matrix_base_counts = {} # for pseudocounts
self.counts = [] # for scaled counts
self.probs = [] # for probabilities
# scale counts to integers
for i in range(len(rows)):
hashRows.append(dict())
for x, sym in enumerate(alphabet):
(w, s) = rows[i][x]
hashRows[i][sym] = w * scale/s
assert hashRows[i][sym] >= 0
if sym not in self.matrix_base_counts:
self.matrix_base_counts[sym] = 0
self.matrix_base_counts[sym] += hashRows[i][sym]
self.counts.append(hashRows[i].copy())
self.probs.append(hashRows[i].copy())
totalWeight = float(sum(self.probs[i].values()))
for sym in self.probs[i]:
self.probs[i][sym] /= totalWeight
self.sites = sum(hashRows[0].values())
# scan pwm to pre-compute logs of probabilities and min and max log-odds
# scores (over the whole PWM) for scaling; note that the same min and max
# applies for scaling long-odds scores for quantum comparisions
self.information_content = []
minSum = 0
maxSum = 0
for i in range(len(hashRows)):
self.information_content.append(self.information_content_calculation(i, hashRows))
newHashRow = {}
for base in self.alphabet:
newHashRow[base] = self.pwm_score(base, i, hashRows)
hashRows[i] = newHashRow
minSum += min(hashRows[i].values())
maxSum += max(hashRows[i].values())
self.minSum = minSum
self.maxSum = maxSum
self.rows = hashRows
# Reference 1: Wasserman and Sandelin: Nat Rev Genet. 2004 Apr;5(4):276-87.
# Reference 2: Gertz et al.: Genome Res. 2005 Aug;15(8):1145-52.
def information_content_calculation(self, i, counts):
# Reference 1)
return 2 + sum([self.information_base_content(base, i, counts) for base in self.alphabet])
# Reference 2)
# return sum( [ self.information_base_content(base,i,counts) for base in self.alphabet ] )
def information_base_content(self, base, i, counts):
# Reference 1)
# return self.score_correction(counts,base,i) * math.log ( self.score_correction(counts,base,i), 2)
# Reference 2)
return self.score_correction(counts, base, i) * self.pwm_score(base, i, counts)
def __call__(self, seq):
return self.score_seq(seq)
def __add__(self, other):
assert self.alphabet == other.alphabet
r, (p, q) = self.max_correlation(other)
if p == q == 0:
width = max(len(self), len(other))
elif p > 0:
width = max(len(other)+p, len(self))
elif q > 0:
width = max(len(self)+q, len(other))
sumx = zeros((width, len(self.alphabet)), dtype='int')
selfx = self.to_count_matrix()
otherx = other.to_count_matrix()
if p == q == 0:
sumx[:len(self)] += selfx
sumx[:len(other)] += otherx
elif p > 0:
sumx[p:p+len(other)] += otherx
sumx[:len(self)] += selfx
else:
sumx[:len(other)] += otherx
sumx[q:q+len(self)] += selfx
newRows = []
for x in sumx:
y = list(x)
y.append(consensus_symbol(y))
y = [str(yi) for yi in y]
newRows.append(y)
return PositionWeightMatrix(self.id+other.id, newRows, self.alphabet, self.background)
def __old_add__(self, other, maxp=None):
assert self.alphabet == other.alphabet
bigN = max(len(self), len(other))
smallN = min(len(self), len(other))
if not maxp:
prsq = self.correlation(other)
maxp = prsq.index(max(prsq))
leftpad = ' ' * maxp
rightsize = bigN - smallN
rightpad = ' ' * rightsize
leftStrings = []
rightStrings = []
if len(self) > len(other):
larger = self
smaller = other
leftStrings = self.consensus
rightStrings = list(leftpad) + other.consensus + list(rightpad)
else:
smaller = self
larger = other
leftStrings = list(leftpad) + self.consensus + list(rightpad)
rightStrings = other.consensus
sumx = zeros([bigN, len(self.alphabet)])
sumx += larger.to_count_matrix()
sumx[maxp:maxp+smallN] += smaller.to_count_matrix()
newRows = []
for i, x in enumerate(sumx):
y = list(x)
y.append(leftStrings[i] + rightStrings[i])
y = [str(yi) for yi in y]
newRows.append(y)
# return PositionWeightMatrix(self.id+other.id,newRows[maxp:maxp+smallN],self.alphabet,self.background)
return PositionWeightMatrix(self.id+other.id, newRows, self.alphabet, self.background)
def to_matrix(self):
m = zeros([len(self), len(self.alphabet)])
for i in range(len(self)):
for j, a in enumerate(self.alphabet):
m[i][j] = self[i][a]
return m
def to_count_matrix(self):
m = zeros([len(self), len(self.alphabet)], dtype='int')
for i in range(len(self)):
for j, a in enumerate(self.alphabet):
m[i][j] = self.counts[i][a]
return m
def max_correlation(self, otherwmx):
rsq, ixtuple = self.slide_correlation(otherwmx)
max_rsq = max(rsq)
maxp, maxq = ixtuple[rsq.index(max_rsq)]
return max_rsq, (maxp, maxq)
def slide_correlation(self, other):
assert self.alphabet == other.alphabet
selfx = self.to_count_matrix()
otherx = other.to_count_matrix()
rsq = []
ixtuple = []
# self staggered over other, scan self backwards until flush
for q in range(len(other)-1, -1, -1):
r = 0
n = 0
for p in range(len(self)):
if q+p < len(other):
r += rsquared(list(selfx[p]), list(otherx[q+p]))
n += 1
else:
n += 1
rsq.append(r/n)
ixtuple.append((0, q))
# other staggered below self , scan other forward
for p in range(1, len(self)):
r = 0
n = 0
for q in range(len(other)):
if p+q < len(self):
r += rsquared(list(selfx[p+q]), list(otherx[q]))
n += 1
else:
n += 1
rsq.append(r/n)
ixtuple.append((p, 0))
return rsq, ixtuple
def correlation(self, otherwmx):
assert self.alphabet == otherwmx.alphabet
if len(self) > len(otherwmx):
larger = self.to_count_matrix()
smaller = otherwmx.to_count_matrix()
else:
smaller = self.to_count_matrix()
larger = otherwmx.to_count_matrix()
bigN = len(larger)
smallN = len(smaller)
position_rsq = []
# slide small over large, for ave rsq
for p in range(bigN):
if p+smallN <= bigN:
r = 0
for q in range(smallN):
r += rsquared(list(smaller[q]), list(larger[p+q]))
position_rsq.append(r / smallN)
return position_rsq
def score_align(self, align, gapmask=None, byPosition=True):
# a blank score matrix
nrows, ncols = align.dims
ascoremax = AlignScoreMatrix(align)
scoremax = ascoremax.matrix
minSeqLen = len(self)
for ir in range(nrows):
# row is missing data
if isnan(align.rows[ir][0]):
continue
for start in range(ncols):
if align.rows[ir][start] == '-':
continue
elif align.rows[ir][start] == 'n':
continue
elif align.rows[ir][start] == 'N':
continue
# get enough sequence for the weight matrix
subseq = ""
end = 0
for ic in range(start, ncols):
char = align.rows[ir][ic]
if char == '-' or char == 'N':
continue
else:
subseq += char
if len(subseq) == minSeqLen:
end = ic+1
# forward
scores = self.score_seq(subseq)
raw, forward_score = scores[0]
# reverse
scores = self.score_reverse_seq(subseq)
raw, reverse_score = scores[0]
score = max(forward_score, reverse_score)
# replace the alignment positions with the result
if byPosition:
scoremax[ir][start] = score
else:
# replace positions matching the width of the pwm
for i in range(start, end):
if isnan(scoremax[ir][i]):
scoremax[ir][i] = score
elif score > scoremax[ir][i]:
scoremax[ir][i] = score
# mask gap characters
if gapmask is None:
gapmask = score_align_gaps(align)
putmask(scoremax, gapmask, float('nan'))
return scoremax
# seq can be a string, a list of characters, or a quantum sequence (a list
# of hashes from symbols to probability)
def score_seq(self, seq):
if isinstance(seq[0], dict):
return self.score_quantum_seq(seq)
scores = []
for start in range(len(seq)):
if start + len(self) > len(seq):
break
subseq = seq[start:start+len(self)]
raw = 0
try:
for i, nt in enumerate(subseq):
raw += self.rows[i][nt.upper()]
scaled = self.scaled(raw)
except KeyError:
raw, scaled = float('nan'), float('nan')
scores.append((raw, scaled))
return scores
def score_quantum_seq(self, seq):
scores = []
for start in range(len(seq)):
if start + len(self) > len(seq):
break
subseq = seq[start:start+len(self)]
raw = 0
try:
for i, nt in enumerate(subseq):
numer = sum([subseq[i][nt] * self.probs[i][nt] for nt in subseq[i]])
denom = sum([subseq[i][nt] * self.background[nt] for nt in subseq[i]])
raw += math.log(numer/denom, 2)
scaled = self.scaled(raw)
except KeyError:
raw, scaled = float('nan'), float('nan')
except OverflowError:
raw, scaled = float('nan'), float('nan')
except ValueError:
raw, scaled = float('nan'), float('nan')
scores.append((raw, scaled))
return scores
def score_reverse_seq(self, seq):
revSeq = reverse_complement(seq)
scores = self.score_seq(revSeq)
scores.reverse()
return scores
def scaled(self, val):
return (val - self.minSum) / (self.maxSum - self.minSum)
def pseudocount(self, base=None):
def f(count):
return math.sqrt(count + 1)
if base in self.alphabet:
return f(self.matrix_base_counts[base])
elif base is None:
return f(self.sites)
else:
return float("nan")
def simple_probability(self, freq, base, i):
# p(base,i) = f(base,i)
# ----------------------
# sum(f(base,{A,C,G,T}))
return float(freq[i][base]) / sum([freq[i][nt] for nt in self.alphabet])
def corrected_probability_score(self, freq, base, i):
# p(base,i) = f(base,i) + s(base)
# --------------------
# N + sum(s(A,C,T,G))
f = float(freq[i][base])
s = self.pseudocount(base)
N = self.sites
# print >>sys.stderr, "f:%.3f + s:%.3f = %.3f" % (f,s,f + s)
# print >>sys.stderr, "-------------------------"
# print >>sys.stderr, "N:%d + %d = %d" % (N,self.pseudocount(), N + self.pseudocount())
# print >>sys.stderr, "\t\t %.3f\n" % ((f + s) / (N + self.pseudocount()))
assert (f + s) > 0
return (f + s) / (N + self.pseudocount())
def pwm_score(self, base, i, freq, background=None):
if background is None:
background = self.background
p = self.score_correction(freq, base, i)
# print >>sys.stderr, p
# print >>sys.stderr, "k %d %c" % (i,base),freq[i][base]
b = background[base]
try:
return math.log(p/b, 2)
except OverflowError:
# print >>sys.stderr,"base=%c, math.log(%.3f / %.3f)" % (base,p,b)
# print >>sys.stderr,self.id
return float('nan')
except ValueError:
# print >>sys.stderr,"base=%c, math.log(%.3f / %.3f)" % (base,p,b)
# print >>sys.stderr,self.id
return float('nan')
def parse_weight(self, weightString):
fields = weightString.split(".")
if len(fields) > 2:
raise ValueError
w = int(fields[0])
s = 1
if len(fields) == 2:
for _ in range(0, len(fields[1])):
s *= 10
w = s*w + int(fields[1])
return (w, s) # w = the weight
# s = the scale used (a power of 10)
def __str__(self):
lines = [self.id]
headers = ["%s" % nt for nt in self.alphabet]
lines.append("P0\t" + "\t".join(headers))
for ix in range(0, len(self.rows)):
weights = ["%d" % self.counts[ix][nt] for nt in self.alphabet]
# lines.append(("%02d\t" % ix) + "\t".join(weights) + "\t" + self.consensus[ix])
lines.append(("%02d\t" % ix) + "\t".join(weights) + "\t" + str(sum(self.counts[ix].values())) + "\t" + self.consensus[ix])
return "\n".join(lines)
def __getitem__(self, key):
return self.rows[key]
def __setitem__(self, key, value):
self.rows[key] = value
def __len__(self):
return len(self.rows)
def score_align_gaps(align):
# a blank score matrix
nrows, ncols = align.dims
scoremax = AlignScoreMatrix(align).matrix
for ir in range(nrows):
# row is missing data
if isnan(align.rows[ir][0]):
continue
# scan for gaps
for pos in range(ncols):
if align.rows[ir][pos] == '-':
scoremax[ir][pos] = 1
else:
scoremax[ir][pos] = 0
return scoremax
# -----------
#
# WeightMatrix Reader--
# Read position weight matrices (PWM) from a file.
#
# -----------
class Reader:
"""Iterate over all interesting weight matrices in a file"""
def __init__(self, file, tfIds=None, name=None, format='basic', background=None, score_correction=True):
self.tfIds = tfIds
self.file = file
self.name = name
self.lineNumber = 0
self.format = format
self.background = background
self.score_correction = score_correction
def close(self):
self.file.close()
def where(self):
if self.name is None:
return "line %d" % self.lineNumber
else:
return "line %d in %s" % (self.lineNumber, self.name)
def __iter__(self):
if self.format == 'basic':
return self.read_as_basic()
elif self.format == 'transfac':
return self.read_as_transfac()
else:
raise ValueError("unknown weight matrix file format: '%s'" % self.format)
def read_as_basic(self):
tfId = None
pwmRows = None
alphabet = ['A', 'C', 'G', 'T']
while (True):
line = self.file.readline()
if not line:
break
line = line.strip()
self.lineNumber += 1
if line.startswith(">"):
if pwmRows is not None:
yield PositionWeightMatrix(tfId, pwmRows, alphabet, background=self.background)
# try:
# yield PositionWeightMatrix(tfId,pwmRows,alphabet)
# except:
# print >>sys.stderr, "Failed to read", tfId
tfId = line.strip()[1:]
pwmRows = []
elif line[0].isdigit():
tokens = line.strip().split()
tokens.append(consensus_symbol(line))
# print >>sys.stderr,[ "%.2f" % (float(v)/sum(vals)) for v in vals], tokens[-1]
pwmRows.append(tokens)
if pwmRows is not None: # we've finished collecting a desired matrix
yield PositionWeightMatrix(tfId, pwmRows, alphabet, background=self.background, score_correction=self.score_correction)
def read_as_transfac(self):
self.tfToPwm = {}
tfId = None
pwmRows = None
while True:
line = self.file.readline()
if not line:
break
line = line.strip()
self.lineNumber += 1
# handle an ID line
if line.startswith("ID"):
if pwmRows is not None: # we've finished collecting a desired matrix
try:
# FIXME: alphabet is undefined here!
yield PositionWeightMatrix(tfId, pwmRows, alphabet, background=self.background, score_correction=self.score_correction) # noqa: F821
except Exception:
print("Failed to read", tfId, file=sys.stderr)
tfId = None
pwmRows = None
tokens = line.split(None, 2)
if len(tokens) != 2:
raise ValueError("bad line, need two fields (%s)" % self.where())
tfId = tokens[1]
if self.tfIds is not None and (tfId not in self.tfIds):
continue # ignore it, this isn't a desired matrix
if tfId in self.tfToPwm:
raise ValueError(f"transcription factor {tfId} appears twice ({self.where()})")
pwmRows = [] # start collecting a desired matrix
continue
# if we're not collecting, skip this line
if pwmRows is None:
continue
if len(line) < 1:
continue
# name, if present, added to ID
if line.startswith('NA'):
words = line.strip().split()
tfId = tfId + "\t" + " ".join(words[1:])
# handle a P0 line
if line.startswith("P0"):
alphabet = line.split()[1:]
if len(alphabet) < 2:
raise ValueError("bad line, need more dna (%s)" % self.where())
continue
# handle a 01,02,etc. line
if line[0].isdigit():
tokens = line.split()
try:
index = int(tokens[0])
if index != len(pwmRows)+1:
raise ValueError
except Exception:
raise ValueError("bad line, bad index (%s)" % self.where())
pwmRows.append(tokens[1:])
continue
# skip low quality entries
if line.startswith("CC TRANSFAC Sites of quality"):
print(line.strip(), tfId, file=sys.stderr)
pwmRows = None
continue
if pwmRows is not None: # we've finished collecting a desired matrix
yield PositionWeightMatrix(tfId, pwmRows, alphabet, background=self.background, score_correction=self.score_correction)
# clean up
self.tfToPwm = None
def isnan(x):
# return ieeespecial.isnan(x)
if x == x:
return False
return True
def reverse_complement(nukes):
return nukes[::-1].translate(PositionWeightMatrix.complementMap)
def rsquared(x, y):
try:
return sum_of_squares(x, y)**2 / (sum_of_squares(x) * sum_of_squares(y))
except ZeroDivisionError:
# return float('nan')
return 0
def sum_of_squares(x, y=None):
if not y:
y = x
xmean = float(sum(x)) / len(x)
ymean = float(sum(y)) / len(y)
assert len(x) == len(y)
return sum([float(xi)*float(yi) for xi, yi in zip(x, y)]) - len(x)*xmean*ymean
def consensus_symbol(pattern):
if isinstance(pattern, str):
try:
pattern = [int(x) for x in pattern.split()]
except ValueError as e:
print(pattern, file=sys.stderr)
raise ValueError(e)
# IUPAC-IUB nomenclature for wobblers
wobblers = {
'R': frozenset(['A', 'G']),
'Y': frozenset(['C', 'T']),
'M': frozenset(['A', 'C']),
'K': frozenset(['G', 'T']),
'S': frozenset(['G', 'C']),
'W': frozenset(['A', 'T']),
'H': frozenset(['A', 'C', 'T']),
'B': frozenset(['G', 'T', 'C']),
'V': frozenset(['G', 'C', 'A']),
'D': frozenset(['G', 'T', 'A'])}
symbols = ['A', 'C', 'G', 'T']
if isinstance(pattern, dict):
pattern = [pattern[u] for u in symbols]
total = sum(pattern)
f = [(space/1e5)+(float(x)/total) for space, x in enumerate(pattern)]
copy = []
copy[:] = f[:]
copy.sort()
# http://www.genomatix.de/online_help/help_matinspector/matrix_help.html --
# url says consensus must be greater than 50%, and at least twice the freq
# of the second-most frequent. A double-degenerate symbol can be used
# if the top two account for 75% or more of the nt, if each is less than 50%
# Otherwise, N is used in the consensus.
tops = copy[-2:]
if tops[1] > 0.5 and tops[1] >= 2 * tops[0]:
return symbols[f.index(tops[1])]
elif tops[0] < 0.5 and sum(tops) >= 0.75:
degen = frozenset([symbols[f.index(v)] for v in tops])
for degenSymbol, wobbles in wobblers.items():
# print >>sys.stderr,wobbles
if degen == wobbles:
return degenSymbol
else:
return 'N'
print(pattern, file=sys.stderr)
raise Exception('?')
# import C extensions
try:
from ._position_weight_matrix import c_match_consensus
def match_consensus(sequence, pattern):
return c_match_consensus(sequence, pattern, len(sequence))
# print >>sys.stderr, "C match_consensus used"
except ImportError:
# print >>sys.stderr, "python match_consensus used"
def match_consensus(sequence, pattern, size):
for s, p in zip(sequence, pattern):
if p == 'N':
continue
if s not in PositionWeightMatrix.symbols[p]:
return False
return True
|
bxlab/bx-python
|
lib/bx/pwm/position_weight_matrix.py
|
Python
|
mit
| 30,674
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('web', '0016_auto_20150304_0113'),
]
operations = [
migrations.AddField(
model_name='comentario',
name='responder',
field=models.ForeignKey(blank=True, to='web.comentario', null=True),
preserve_default=True,
),
]
|
palichis/elmolino
|
web/migrations/0017_comentario_responder.py
|
Python
|
gpl-2.0
| 468
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
from QuantConnect.Data import SubscriptionDataSource
from QuantConnect.Python import PythonData
from datetime import datetime, timedelta
import decimal
class QCUWeatherBasedRebalancing(QCAlgorithm):
'''Initialize: Storage for our custom data:
Source: http://www.wunderground.com/history/
Make sure to link to the actual file download URL if using dropbox.
url = "https://dl.dropboxusercontent.com/u/44311500/KNYC.csv"'''
def Initialize(self):
self.SetStartDate(2013,1,1) #Set Start Date
self.SetEndDate(2016,1,1) #Set End Date
self.SetCash(25000) #Set Strategy Cash
self.AddEquity("SPY", Resolution.Daily)
self.symbol = self.Securities["SPY"].Symbol
# KNYC is NYC Central Park. Find other locations at
# https://www.wunderground.com/history/
self.AddData(Weather, "KNYC", Resolution.Minute)
self.weather = self.Securities["KNYC"].Symbol
self.tradingDayCount = 0
self.rebalanceFrequency = 10
# When we have a new event trigger, buy some stock:
def OnData(self, data):
if self.weather not in data: return
# Scale from -5C to +25C :: -5C == 100%, +25C = 0% invested
fraction = -(data[self.weather].MinC + 5) / 30 if self.weather in data else 0
#self.Debug("Faction {0}".format(faction))
# Rebalance every 10 days:
if self.tradingDayCount >= self.rebalanceFrequency:
self.SetHoldings(self.symbol, fraction)
self.tradingDayCount = 0
def OnEndOfDay(self):
self.tradingDayCount += 1
class Weather(PythonData):
''' Weather based rebalancing'''
def GetSource(self, config, date, isLive):
source = "https://dl.dropboxusercontent.com/u/44311500/KNYC.csv"
source = "https://www.wunderground.com/history/airport/{0}/{1}/1/1/CustomHistory.html?dayend=31&monthend=12&yearend={1}&format=1".format(config.Symbol, date.year);
return SubscriptionDataSource(source, SubscriptionTransportMedium.RemoteFile);
def Reader(self, config, line, date, isLive):
# If first character is not digit, pass
if not (line.strip() and line[0].isdigit()): return None
data = line.split(',')
weather = Weather()
weather.Symbol = config.Symbol
weather.Time = datetime.strptime(data[0], '%Y-%m-%d') + timedelta(hours=20) # Make sure we only get this data AFTER trading day - don't want forward bias.
weather.Value = decimal.Decimal(data[2])
weather["MaxC"] = float(data[1])
weather["MinC"] = float(data[3])
return weather
|
tomhunter-gh/Lean
|
Algorithm.Python/QCUWeatherBasedRebalancing.py
|
Python
|
apache-2.0
| 3,595
|
from twython import Twython
from config import APP_KEY, APP_SECRET
def obtain_auth_url():
"""Used to app to tweet to my account
NOT CALLED ANYWHERE"""
twitter = Twython(APP_KEY, APP_SECRET)
auth = twitter.get_authentication_tokens()
oauth_token = auth['oauth_token']
oauth_token_secret = auth['oauth_token_secret']
print "\n\n\nGo to the following URL to authorize app:"
print auth['auth_url']
oauth_verifier = raw_input("\nEnter the pin: ")
twitter = Twython(APP_KEY, APP_SECRET, oauth_token, oauth_token_secret)
authorized = twitter.get_authorized_tokens(oauth_verifier)
#write confirmed tokens to disk
with open("config.py", "a") as config_file:
config_file.write("\n'OAUTH_TOKEN': '" + authorized['oauth_token']
+ "'\n'OAUTH_TOKEN_SECRET': '" + authorized['oauth_token_secret'] + "'")
obtain_auth_url()
|
BWeatherMaine/WXGIF
|
twitter.py
|
Python
|
apache-2.0
| 885
|
#!/usr/bin/env python
# encoding: utf-8
"""
permissions.py
Created by Frédéric LASNIER / Christophe VAN FRACKEM on 2014/02/02/.
Copyright (c) 2014 Où & Quand. All rights reserved.
Mecanicadom permission.
"""
__author__ = 'Frédéric LASNIER <fred@ouetquand.biz> / Christophe VAN FRACKEM <contact@tisspage.fr>'
__version__= '0.0.1'
__copyright__ = '© 2014 Où & Quand Pour Mécanicadom'
# Import #
from rest_framework.permissions import BasePermission, SAFE_METHODS
from django.contrib.auth.models import User
class AdressePerm(BasePermission):
SAFE_METHODS = ['GET', 'HEAD', 'OPTIONS']
#Permission for detail object
def has_object_permission(self, request, view, obj):
utilisateur = Utilisateur.objects.filter(username = request.user.username)
util = ''
if len(utilisateur) > 0:
util = Utilisateur.objects.get(username = request.user.username)
if request.method in self.SAFE_METHODS:
return True
elif request.method == 'DELETE':
if util != '':
if util.usr_cat == 'admin-tech':
return True
elif request.method == 'PUT':
if util != '':
if (util.usr_cat == 'admin-tech') or (util.usr_cat == 'admin-ceo') or (obj.created_by == util) :
return True
elif request.method == 'POST':
if util != '':
if (util.usr_cat == 'admin-tech') or (util.usr_cat == 'admin-ceo'):
return True
elif request.method == 'PATCH':
if util != '':
if util.usr_cat == 'admin-tech':
return True
return obj.created_by == request.user
#Permission for list object
def has_permission(self, request, view):
utilisateur = Utilisateur.objects.filter(username = request.user.username)
util = ''
if len(utilisateur) > 0:
util = Utilisateur.objects.get(username = request.user.username)
if request.method in self.SAFE_METHODS:
return True
elif request.method == 'DELETE':
if util != '':
if util.usr_cat == 'admin-tech':
return True
else:
return False
elif request.method == 'PUT':
if util != '':
if (util.usr_cat == 'admin-tech') or (util.usr_cat == 'admin-ceo') :
return True
else:
return False
elif request.method == 'POST':
if util != '':
if (util.usr_cat == 'admin-tech') or (util.usr_cat == 'admin-ceo'):
return True
else:
return False
elif request.method == 'PATCH':
if util != '':
if util.usr_cat == 'admin-tech':
return True
else:
return False
return True
class UtilisateurPerm(BasePermission):
SAFE_METHODS = ['GET', 'HEAD', 'OPTIONS']
#Permission for detail object
def has_object_permission(self, request, view, obj):
utilisateur = Utilisateur.objects.filter(username = request.user.username)
util = ''
if len(utilisateur) > 0:
util = Utilisateur.objects.get(username = request.user.username)
if request.method in self.SAFE_METHODS:
return True
elif request.method == 'DELETE':
if util != '':
if util.usr_cat == 'admin-tech':
return True
elif request.method == 'PUT':
if util != '':
if (util.usr_cat == 'admin-tech') or (util.usr_cat == 'admin-ceo') or (obj.created_by == util) :
return True
elif request.method == 'POST':
if util != '':
if (util.usr_cat == 'admin-tech') or (util.usr_cat == 'admin-ceo'):
return True
elif request.method == 'PATCH':
if util != '':
if util.usr_cat == 'admin-tech':
return True
return obj.created_by == request.user
#Permission for list object
def has_permission(self, request, view):
utilisateur = Utilisateur.objects.filter(username = request.user.username)
util = ''
if len(utilisateur) > 0:
util = Utilisateur.objects.get(username = request.user.username)
if request.method in self.SAFE_METHODS:
return True
elif request.method == 'DELETE':
if util != '':
if util.usr_cat == 'admin-tech':
return True
else:
return False
elif request.method == 'PUT':
if util != '':
if (util.usr_cat == 'admin-tech') or (util.usr_cat == 'admin-ceo') :
return True
else:
return False
elif request.method == 'POST':
if util != '':
if (util.usr_cat == 'admin-tech') or (util.usr_cat == 'admin-ceo'):
return True
else:
return False
elif request.method == 'PATCH':
if util != '':
if util.usr_cat == 'admin-tech':
return True
else:
return False
return True
class VehiculePerm(BasePermission):
SAFE_METHODS = ['GET', 'HEAD', 'OPTIONS']
#Permission for detail object
def has_object_permission(self, request, view, obj):
utilisateur = Utilisateur.objects.filter(username = request.user.username)
util = ''
if len(utilisateur) > 0:
util = Utilisateur.objects.get(username = request.user.username)
if request.method in self.SAFE_METHODS:
return True
elif request.method == 'DELETE':
if util != '':
if util.usr_cat == 'admin-tech':
return True
elif request.method == 'PUT':
if util != '':
if (util.usr_cat == 'admin-tech') or (util.usr_cat == 'admin-ceo') or (obj.created_by == util) :
return True
elif request.method == 'POST':
if util != '':
if (util.usr_cat == 'admin-tech') or (util.usr_cat == 'admin-ceo'):
return True
elif request.method == 'PATCH':
if util != '':
if util.usr_cat == 'admin-tech':
return True
return obj.created_by == request.user
#Permission for list object
def has_permission(self, request, view):
utilisateur = Utilisateur.objects.filter(username = request.user.username)
util = ''
if len(utilisateur) > 0:
util = Utilisateur.objects.get(username = request.user.username)
if request.method in self.SAFE_METHODS:
return True
elif request.method == 'DELETE':
if util != '':
if util.usr_cat == 'admin-tech':
return True
else:
return False
elif request.method == 'PUT':
if util != '':
if (util.usr_cat == 'admin-tech') or (util.usr_cat == 'admin-ceo') :
return True
else:
return False
elif request.method == 'POST':
if util != '':
if (util.usr_cat == 'admin-tech') or (util.usr_cat == 'admin-ceo'):
return True
else:
return False
elif request.method == 'PATCH':
if util != '':
if util.usr_cat == 'admin-tech':
return True
else:
return False
return True
|
tisspage/ucar
|
api-server/api_v0/permissions.py
|
Python
|
mit
| 6,262
|
# Copyright (c) 2017, Frappe and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doc("core", "doctype", "sms_parameter")
sms_sender_name = frappe.db.get_single_value("SMS Settings", "sms_sender_name")
if sms_sender_name:
frappe.reload_doc("core", "doctype", "sms_settings")
sms_settings = frappe.get_doc("SMS Settings")
sms_settings.append("parameters", {
"parameter": "sender_name",
"value": sms_sender_name
})
sms_settings.flags.ignore_mandatory = True
sms_settings.flags.ignore_permissions = True
sms_settings.save()
|
vjFaLk/frappe
|
frappe/patches/v9_1/add_sms_sender_name_as_parameters.py
|
Python
|
mit
| 651
|
#This file is part of Tryton. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
import os
import sys
import imp
import gettext
_ = gettext.gettext
PLUGINS_PATH = os.path.dirname(__file__)
if not os.path.isdir(PLUGINS_PATH):
# try for py2exe
PLUGINS_PATH = os.path.join(os.path.abspath(os.path.normpath(
os.path.dirname(sys.argv[0]))), 'plugins')
MODULES = []
def register():
global MODULES
if os.path.isdir(PLUGINS_PATH):
for plugin in os.listdir(PLUGINS_PATH):
if not os.path.isdir(os.path.join(PLUGINS_PATH, plugin)):
continue
module = os.path.splitext(plugin)[0]
try:
module = imp.load_module(module, *imp.find_module(module,
[PLUGINS_PATH]))
MODULES.append(module)
except ImportError:
continue
|
sunny414/tryton-client
|
tryton/plugins/__init__.py
|
Python
|
gpl-3.0
| 932
|
import sys; import os
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('.'))
from flask import Flask, render_template, request, redirect
import subprocess
from Utils import subprocess_helpers
from Utils.DataSource import *
app = Flask(__name__)
dataSource = DataSource()
def launch_preprocessors():
process = subprocess.Popen(
subprocess_helpers.python_path + " Daemons/QueryProcessorDaemon.py && " + subprocess_helpers.python_path + " Daemons/ArticleProcessorDaemon.py",
executable=subprocess_helpers.executable, shell=True, universal_newlines=True)
@app.route("/", methods=["GET"])
def queries():
# Get lists of query from database with counts of associated articles
all_queries = dataSource.queries_route()
queries_formatted = [{"id": q[0], "subject": q[1], "verb": q[2], "direct_obj": q[3], "indirect_obj": q[4],
"loc": q[5], "article_count": q[6]} for q in all_queries]
return render_template("queries.html", queries=queries_formatted)
@app.route("/query", methods=["POST"])
def new_query():
# TODO: server side validation
subject = request.form["subject"]
verb = request.form["verb"]
direct_obj = request.form["direct-object"]
indirect_obj = request.form["indirect-object"]
loc = request.form["location"]
email = request.form["user-email"]
phone = request.form["user-phone"]
# Put into database
dataSource.new_query(email, phone, subject, verb, direct_obj, indirect_obj, loc)
return redirect("/")
@app.route("/query/<query_id>", methods=["GET"])
def query(query_id):
# find query by id
# if we don't find a query with that id, 404
articles, db_query = dataSource.query_route(query_id)
if db_query is not None:
articles_formatted = [{"title": a[0], "source": a[1], "url": a[2]} for a in articles]
query_formatted = {"id": db_query[0], "subject": db_query[1], "verb": db_query[2],
"direct_obj": db_query[3], "indirect_obj": db_query[4], "loc": db_query[5]}
return render_template("query.html", query=query_formatted, articles=articles_formatted)
return render_template("404.html"), 404
@app.route("/articles", methods=["GET"])
def articles():
articles = dataSource.articles_route()
articles_formatted = [{"title": a[0], "source": a[1], "url": a[2]} for a in articles]
return render_template("articles.html", articles=articles_formatted)
@app.errorhandler(404)
def page_not_found(e):
return render_template("404.html"), 404
if __name__ == "__main__":
app.run(debug=True)
|
beallej/event-detection
|
WebApp/EventDetectionWeb.py
|
Python
|
mit
| 2,619
|
import re, logging, random, time
from autotest.client.shared import error
from virttest import qemu_monitor, utils_test, utils_misc
@error.context_aware
def run_balloon_check(test, params, env):
"""
Check Memory ballooning, use M when compare memory in this script:
1) Boot a guest with balloon enabled/disabled.
2) Check whether monitor command 'info balloon' works
3) Reduce memory to random size between Free memory to max memory size
4) Run optional test after evicting memory (Optional)
5) Reset memory value to original memory assigned on qemu (Optional)
6) Run optional test after enlarging memory (Optional)
7) Check memory after sub test
8) Check whether the memory is set up correctly
@param test: QEMU test object
@param params: Dictionary with the test parameters
@param env: Dictionary with test environment.
"""
def error_report(step, expect_value, monitor_value, guest_value,
guest_changed=None, ori_value=None):
"""
Generate the error report
@param step: the step of the error happen
@param expect_value: memory size assign to the vm
@param monitor_value: memory size report from monitor, this value can
be None
@param guest_value: memory size report from guest, this value can be
None
@param ori_value: memory size in qemu command line
"""
logging.error("Memory size mismatch %s:\n" % step)
if guest_changed is not None:
error_msg = "Wanted to be changed: %s\n" % (ori_value
- expect_value)
if monitor_value:
error_msg += "Changed in monitor: %s\n" % (ori_value
- monitor_value)
error_msg += "Changed in guest: %s\n" % guest_changed
else:
error_msg = "Assigner to VM: %s\n" % expect_value
if monitor_value:
error_msg += "Reported by monitor: %s\n" % monitor_value
if guest_value:
error_msg += "Reported by guest OS: %s\n" % guest_value
logging.error(error_msg)
def check_ballooned_memory():
"""
Verify the actual memory reported by monitor command info balloon. If
the operation failed, increase the failure counter.
@return: Number of failures occurred during operation.
"""
try:
output = vm.monitor.info("balloon")
ballooned_mem = int(re.findall("\d+", str(output))[0])
if vm.monitor.protocol == "qmp":
ballooned_mem *= 1024 ** -2
except qemu_monitor.MonitorError, e:
logging.error(e)
return 0
return ballooned_mem
def get_memory_status():
"""
Get Memory status inside guest. As memory balloon shows different
results in Windows and Linux guests. So use different method for
them.
@return: Number of failures occurred during operation and the memory
size.
"""
try:
if params["os_type"] == "windows":
# In Windows guest we get the free memory for memory compare
memory = session.cmd_output(free_mem_cmd)
memory = int(re.findall("\d+", memory)[0])
memory *= 1024 ** -1
else:
memory = vm.get_current_memory_size()
except Exception, e:
logging.error(e)
return 0
return memory
def memory_check(step, ballooned_mem, ori_mmem, ori_gmem, ratio):
"""
Check memory status according expect values
@param step: the check point string
@param ballooned_mem: ballooned memory in current step
@param ori_mmem: original memory size get from monitor
@param ori_gmem: original memory size get from guest
@param ratio: the ratio that can accept when check results
"""
error.context("Check memory status %s" % step, logging.info)
mmem = check_ballooned_memory()
gmem = get_memory_status()
if (abs(mmem - ori_mmem) != ballooned_mem
or (abs(gmem - ori_gmem) < ratio * ballooned_mem)):
if params["os_type"] == "windows":
error_report(step, ori_mmem - ballooned_mem, mmem, None,
abs(gmem - ori_gmem), ori_mmem)
else:
error_report(step, ori_mmem - ballooned_mem, mmem, gmem)
raise error.TestFail("Balloon test failed %s" % step)
def balloon_memory(new_mem):
"""
Baloon memory to new_mem and verifies on both qemu monitor and
guest OS if change worked.
@param new_mem: New desired memory.
"""
error.context("Change VM memory to %s" % new_mem, logging.info)
compare_mem = new_mem
if params["monitor_type"] == "qmp":
new_mem = new_mem * 1024 * 1024
# This should be replaced by proper monitor method call
vm.monitor.send_args_cmd("balloon value=%s" % new_mem)
balloon_timeout = float(params.get("balloon_timeout", 100))
s = utils_misc.wait_for((lambda: compare_mem
== check_ballooned_memory()),
balloon_timeout)
if s is None:
raise error.TestFail("Failed to balloon memory to expect"
" value during %ss" % balloon_timeout)
free_mem_cmd = params["free_mem_cmd"]
ratio = float(params.get("ratio", 0.5))
vm = env.get_vm(params["main_vm"])
error.context("Boot a guest", logging.info)
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
# Upper limit that we can raise the memory
vm_assigned_mem = int(params["mem"])
error.context("Check the memory in guest", logging.info)
boot_mem = vm.get_memory_size()
error.context("Check whether monitor command 'info balloon' works")
monitor_boot_mem = check_ballooned_memory()
if boot_mem != vm_assigned_mem or monitor_boot_mem != vm_assigned_mem:
error_report("check memory before test", vm_assigned_mem,
monitor_boot_mem, boot_mem)
raise error.TestError("Memory status is in currect after guest boot"
" up, abort the test")
if monitor_boot_mem:
logging.info("Current VM memory according to ballooner: %s",
monitor_boot_mem)
guest_boot_mem = get_memory_status()
error.context("Reduce memory to random size between Free memory"
"to max memory size", logging.info)
s, o = session.cmd_status_output(free_mem_cmd)
if s != 0:
raise error.TestError("Can not get guest memory information")
vm_mem_free = int(re.findall('\d+', o)[0]) / 1024
new_mem = int(random.uniform(vm_assigned_mem - vm_mem_free,
vm_assigned_mem))
balloon_memory(new_mem)
ballooned_mem = vm_assigned_mem - new_mem
memory_check("after evict memory", ballooned_mem,
monitor_boot_mem, guest_boot_mem, ratio)
if (params.get("run_evict_sub_test", "no") == "yes"
and 'sub_balloon_test_evict' in params):
error.context("Run optional test after evicting memory", logging.info)
balloon_test = params['sub_balloon_test_evict']
utils_test.run_virt_sub_test(test, params, env, sub_type=balloon_test)
if balloon_test == "shutdown" :
logging.info("Guest shutdown normally after balloon")
return
if params.get("session_need_update", "no") == "yes":
session = vm.wait_for_login(timeout=timeout)
if params.get("qemu_quit_after_sub_case", "no") == "yes":
ballooned_mem = 0
memory_check("after subtest when evicting memory", ballooned_mem,
monitor_boot_mem, guest_boot_mem, ratio)
error.context("Enlarge memory to random size between current memory to"
" max memory size", logging.info)
# This will ensure we won't trigger guest OOM killer while running
# multiple iterations.
expect_mem = int(random.uniform(new_mem, vm_assigned_mem))
balloon_memory(expect_mem)
ballooned_mem = vm_assigned_mem - expect_mem
memory_check("after enlarge memory", ballooned_mem,
monitor_boot_mem, guest_boot_mem, ratio)
if (params.get("run_enlarge_sub_test", "no") == "yes"
and 'sub_balloon_test_enlarge' in params):
error.context("Run optional test after enlarging memory",
logging.info)
balloon_test = params['sub_balloon_test_enlarge']
utils_test.run_virt_sub_test(test, params, env, sub_type=balloon_test)
if balloon_test == "shutdown" :
logging.info("Guest shutdown normally after balloon")
return
if params.get("session_need_update", "no") == "yes":
session = vm.wait_for_login(timeout=timeout)
if params.get("qemu_quit_after_sub_case", "no") == "yes":
ballooned_mem = 0
memory_check("after subtest when enlarging memory", ballooned_mem,
monitor_boot_mem , guest_boot_mem, ratio)
session.close()
|
sathnaga/virt-test
|
qemu/tests/balloon_check.py
|
Python
|
gpl-2.0
| 9,450
|
import argparse
import json
import pprint
import sys
import urllib
import urllib2
import oauth2
API_HOST = 'api.yelp.com'
DEFAULT_TERM = 'dinner'
DEFAULT_LOCATION = 'San Francisco, CA'
SEARCH_LIMIT = 3
SEARCH_PATH = '/v2/search/'
BUSINESS_PATH = '/v2/business/'
# OAuth credential placeholders that must be filled in by users.
CONSUMER_KEY = None
CONSUMER_SECRET = None
TOKEN = None
TOKEN_SECRET = None
def request(host, path, url_params=None):
"""Prepares OAuth authentication and sends the request to the API.
Args:
host (str): The domain host of the API.
path (str): The path of the API after the domain.
url_params (dict): An optional set of query parameters in the request.
Returns:
dict: The JSON response from the request.
Raises:
urllib2.HTTPError: An error occurs from the HTTP request.
"""
url_params = url_params or {}
url = 'http://{0}{1}?'.format(host, urllib.quote(path.encode('utf8')))
consumer = oauth2.Consumer(CONSUMER_KEY, CONSUMER_SECRET)
oauth_request = oauth2.Request(method="GET", url=url, parameters=url_params)
oauth_request.update(
{
'oauth_nonce': oauth2.generate_nonce(),
'oauth_timestamp': oauth2.generate_timestamp(),
'oauth_token': TOKEN,
'oauth_consumer_key': CONSUMER_KEY
}
)
token = oauth2.Token(TOKEN, TOKEN_SECRET)
oauth_request.sign_request(oauth2.SignatureMethod_HMAC_SHA1(), consumer, token)
signed_url = oauth_request.to_url()
print u'Querying {0} ...'.format(url)
conn = urllib2.urlopen(signed_url, None)
try:
response = json.loads(conn.read())
finally:
conn.close()
return response
def search(term, location):
"""Query the Search API by a search term and location.
Args:
term (str): The search term passed to the API.
location (str): The search location passed to the API.
Returns:
dict: The JSON response from the request.
"""
url_params = {
'term': term.replace(' ', '+'),
'location': location.replace(' ', '+'),
'limit': SEARCH_LIMIT
}
return request(API_HOST, SEARCH_PATH, url_params=url_params)
def get_business(business_id):
"""Query the Business API by a business ID.
Args:
business_id (str): The ID of the business to query.
Returns:
dict: The JSON response from the request.
"""
business_path = BUSINESS_PATH + business_id
return request(API_HOST, business_path)
def query_api(term, location):
"""Queries the API by the input values from the user.
Args:
term (str): The search term to query.
location (str): The location of the business to query.
"""
response = search(term, location)
businesses = response.get('businesses')
if not businesses:
print u'No businesses for {0} in {1} found.'.format(term, location)
return
business_id = businesses[0]['id']
print u'{0} businesses found, querying business info for the top result "{1}" ...'.format(
len(businesses),
business_id
)
response = get_business(business_id)
print u'Result for business "{0}" found:'.format(business_id)
pprint.pprint(response, indent=2)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-q', '--term', dest='term', default=DEFAULT_TERM, type=str, help='Search term (default: %(default)s)')
parser.add_argument('-l', '--location', dest='location', default=DEFAULT_LOCATION, type=str, help='Search location (default: %(default)s)')
input_values = parser.parse_args()
try:
query_api(input_values.term, input_values.location)
except urllib2.HTTPError as error:
sys.exit('Encountered HTTP error {0}. Abort program.'.format(error.code))
if __name__ == '__main__':
main()
|
tosfan4ever/hacktheplanet
|
api/utils/yelp.py
|
Python
|
bsd-3-clause
| 3,892
|
# encoding: utf-8
# module gio._gio
# from /usr/lib/python2.7/dist-packages/gtk-2.0/gio/_gio.so
# by generator 1.135
# no doc
# imports
import gio as __gio
import glib as __glib
import gobject as __gobject
import gobject._gobject as __gobject__gobject
class FileMonitorEvent(__gobject.GEnum):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is ''
__enum_values__ = {
0: 0,
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
}
__gtype__ = None # (!) real value is ''
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247972723/gio/_gio/FileMonitorEvent.py
|
Python
|
gpl-2.0
| 785
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class GmDashConfig(AppConfig):
name = 'gm_dash'
|
Gamification-Dashboard/gamification-dashboard
|
dashboard/gm_dash/apps.py
|
Python
|
gpl-3.0
| 153
|
# Copyright (c) 2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sqlalchemy as sa
from sqlalchemy import orm
from quantum.common import utils
from quantum.db import model_base
class HasTenant(object):
"""Tenant mixin, add to subclasses that have a tenant."""
# NOTE(jkoelker) tenant_id is just a free form string ;(
tenant_id = sa.Column(sa.String(255))
class HasId(object):
"""id mixin, add to subclasses that have an id."""
id = sa.Column(sa.String(36), primary_key=True, default=utils.str_uuid)
class IPAvailabilityRange(model_base.BASEV2):
"""Internal representation of available IPs for Quantum subnets.
Allocation - first entry from the range will be allocated.
If the first entry is equal to the last entry then this row
will be deleted.
Recycling ips involves appending to existing ranges. This is
only done if the range is contiguous. If not, the first_ip will be
the same as the last_ip. When adjacent ips are recycled the ranges
will be merged.
"""
allocation_pool_id = sa.Column(sa.String(36),
sa.ForeignKey('ipallocationpools.id',
ondelete="CASCADE"),
nullable=True,
primary_key=True)
first_ip = sa.Column(sa.String(64), nullable=False, primary_key=True)
last_ip = sa.Column(sa.String(64), nullable=False, primary_key=True)
def __repr__(self):
return "%s - %s" % (self.first_ip, self.last_ip)
class IPAllocationPool(model_base.BASEV2, HasId):
"""Representation of an allocation pool in a Quantum subnet."""
subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id',
ondelete="CASCADE"),
nullable=True)
first_ip = sa.Column(sa.String(64), nullable=False)
last_ip = sa.Column(sa.String(64), nullable=False)
available_ranges = orm.relationship(IPAvailabilityRange,
backref='ipallocationpool',
lazy="dynamic")
def __repr__(self):
return "%s - %s" % (self.first_ip, self.last_ip)
class IPAllocation(model_base.BASEV2):
"""Internal representation of allocated IP addresses in a Quantum subnet.
"""
port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id',
ondelete="CASCADE"),
nullable=True)
ip_address = sa.Column(sa.String(64), nullable=False, primary_key=True)
subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id',
ondelete="CASCADE"),
nullable=False, primary_key=True)
network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id",
ondelete="CASCADE"),
nullable=False, primary_key=True)
expiration = sa.Column(sa.DateTime, nullable=True)
class Port(model_base.BASEV2, HasId, HasTenant):
"""Represents a port on a quantum v2 network."""
name = sa.Column(sa.String(255))
network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id"),
nullable=False)
fixed_ips = orm.relationship(IPAllocation, backref='ports', lazy="dynamic")
mac_address = sa.Column(sa.String(32), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
status = sa.Column(sa.String(16), nullable=False)
device_id = sa.Column(sa.String(255), nullable=False)
device_owner = sa.Column(sa.String(255), nullable=False)
class DNSNameServer(model_base.BASEV2):
"""Internal representation of a DNS nameserver."""
address = sa.Column(sa.String(128), nullable=False, primary_key=True)
subnet_id = sa.Column(sa.String(36),
sa.ForeignKey('subnets.id',
ondelete="CASCADE"),
primary_key=True)
class Route(model_base.BASEV2):
"""Represents a route for a subnet or port."""
destination = sa.Column(sa.String(64), nullable=False, primary_key=True)
nexthop = sa.Column(sa.String(64), nullable=False, primary_key=True)
subnet_id = sa.Column(sa.String(36),
sa.ForeignKey('subnets.id',
ondelete="CASCADE"),
primary_key=True)
class Subnet(model_base.BASEV2, HasId, HasTenant):
"""Represents a quantum subnet.
When a subnet is created the first and last entries will be created. These
are used for the IP allocation.
"""
name = sa.Column(sa.String(255))
network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id'))
ip_version = sa.Column(sa.Integer, nullable=False)
cidr = sa.Column(sa.String(64), nullable=False)
gateway_ip = sa.Column(sa.String(64))
allocation_pools = orm.relationship(IPAllocationPool,
backref='subnet',
lazy="dynamic")
enable_dhcp = sa.Column(sa.Boolean())
dns_nameservers = orm.relationship(DNSNameServer,
backref='subnet',
cascade='delete')
routes = orm.relationship(Route,
backref='subnet',
cascade='delete')
shared = sa.Column(sa.Boolean)
class Network(model_base.BASEV2, HasId, HasTenant):
"""Represents a v2 quantum network."""
name = sa.Column(sa.String(255))
ports = orm.relationship(Port, backref='networks')
subnets = orm.relationship(Subnet, backref='networks')
status = sa.Column(sa.String(16))
admin_state_up = sa.Column(sa.Boolean)
shared = sa.Column(sa.Boolean)
|
FreescaleSemiconductor/quantum
|
quantum/db/models_v2.py
|
Python
|
apache-2.0
| 6,432
|
# The MIT License (MIT)
#
# Copyright (c) 2013 Marek Mikuliszyn
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from django.conf.urls import patterns, url
urlpatterns = patterns('writer.views',
url(r'^$', 'index', name='new_signal'),
url(r'^bar$', 'bar', name='new_bar'),
url(r'^update$', 'update', name='trade_update'),
url(r'^ping$', 'ping', name='ping'),
url(r'^pair$', 'pair', name='pair_update'),
)
|
yezooz/fserver
|
writer/urls.py
|
Python
|
mit
| 1,551
|
"""
================
Sequential Usage
================
By default, *auto-sklearn* fits the machine learning models and build their
ensembles in parallel. However, it is also possible to run the two processes
sequentially. The example below shows how to first fit the models and build the
ensembles afterwards.
"""
from pprint import pprint
import sklearn.model_selection
import sklearn.datasets
import sklearn.metrics
import autosklearn.classification
############################################################################
# Data Loading
# ======================================
X, y = sklearn.datasets.load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = \
sklearn.model_selection.train_test_split(X, y, random_state=1)
############################################################################
# Build and fit the classifier
# ======================================
automl = autosklearn.classification.AutoSklearnClassifier(
time_left_for_this_task=120,
per_run_time_limit=30,
tmp_folder='/tmp/autosklearn_sequential_example_tmp',
# Do not construct ensembles in parallel to avoid using more than one
# core at a time. The ensemble will be constructed after auto-sklearn
# finished fitting all machine learning models.
ensemble_size=0,
delete_tmp_folder_after_terminate=False,
)
automl.fit(X_train, y_train, dataset_name='breast_cancer')
# This call to fit_ensemble uses all models trained in the previous call
# to fit to build an ensemble which can be used with automl.predict()
automl.fit_ensemble(y_train, ensemble_size=50)
############################################################################
# Print the final ensemble constructed by auto-sklearn
# ====================================================
pprint(automl.show_models(), indent=4)
############################################################################
# Get the Score of the final ensemble
# ===================================
predictions = automl.predict(X_test)
print(automl.sprint_statistics())
print("Accuracy score", sklearn.metrics.accuracy_score(y_test, predictions))
|
automl/auto-sklearn
|
examples/60_search/example_sequential.py
|
Python
|
bsd-3-clause
| 2,132
|
from queue import PriorityQueue
import sys
from itertools import count
def pentagonal(n):
return int(n*(3*n-1)/2)
numbers = []
def is_pentagonal(p):
while True:
if p<numbers[-1]: break
numbers.append(pentagonal(len(numbers)+1))
return p in numbers
for s in (pentagonal(n) for n in count(1)):
numbers.append(s)
for pk in numbers:
if pk>=s: break
pj = s-pk
d = pk-pj
if d>0 and pj in numbers and d in numbers:
print(d)
sys.exit()
|
davidxmoody/kata
|
project-euler/completed/euler44.py
|
Python
|
mit
| 524
|
""" DowntimeCommand module will look into GOC DB to find announced downtimes for RSS-managed sites and resources.
If found, downtimes are added to the internal RSS cache using ResourceManagementClient.
GOCDB downtimes that are modified or deleted are also synced.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = '$Id$'
from six.moves.urllib_error import URLError
import re
from datetime import datetime, timedelta
from operator import itemgetter
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.LCG.GOCDBClient import GOCDBClient
from DIRAC.Core.Utilities.SiteSEMapping import getSEHosts, getStorageElementsHosts
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getFTS3Servers, getGOCSiteName,\
getGOCSites, getGOCFTSName, getCESiteMapping
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient
from DIRAC.ResourceStatusSystem.Command.Command import Command
class DowntimeCommand(Command):
"""
Downtime "master" Command or removed DTs.
"""
def __init__(self, args=None, clients=None):
super(DowntimeCommand, self).__init__(args, clients)
if 'GOCDBClient' in self.apis:
self.gClient = self.apis['GOCDBClient']
else:
self.gClient = GOCDBClient()
if 'ResourceManagementClient' in self.apis:
self.rmClient = self.apis['ResourceManagementClient']
else:
self.rmClient = ResourceManagementClient()
def _storeCommand(self, result):
"""
Stores the results of doNew method on the database.
"""
for dt in result:
resQuery = self.rmClient.addOrModifyDowntimeCache(downtimeID=dt['DowntimeID'],
element=dt['Element'],
name=dt['Name'],
startDate=dt['StartDate'],
endDate=dt['EndDate'],
severity=dt['Severity'],
description=dt['Description'],
link=dt['Link'],
gOCDBServiceType=dt['gOCDBServiceType'])
return resQuery
def _cleanCommand(self, element, elementNames):
"""
Clear Cache from expired DT.
"""
resQuery = []
for elementName in elementNames:
# get the list of all DTs stored in the cache
result = self.rmClient.selectDowntimeCache(element=element,
name=elementName)
if not result['OK']:
return result
uniformResult = [dict(zip(result['Columns'], res)) for res in result['Value']]
currentDate = datetime.utcnow()
if not uniformResult:
continue
# get the list of all ongoing DTs from GocDB
gDTLinkList = self.gClient.getCurrentDTLinkList()
if not gDTLinkList['OK']:
return gDTLinkList
for dt in uniformResult:
# if DT expired or DT not in the list of current DTs, then we remove it from the cache
if dt['EndDate'] < currentDate or dt['Link'] not in gDTLinkList['Value']:
result = self.rmClient.deleteDowntimeCache(downtimeID=dt['DowntimeID'])
resQuery.append(result)
return S_OK(resQuery)
def _prepareCommand(self):
"""
DowntimeCommand requires four arguments:
- name : <str>
- element : Site / Resource
- elementType: <str>
If the elements are Site(s), we need to get their GOCDB names. They may
not have, so we ignore them if they do not have.
"""
if 'name' not in self.args:
return S_ERROR('"name" not found in self.args')
elementName = self.args['name']
if 'element' not in self.args:
return S_ERROR('"element" not found in self.args')
element = self.args['element']
if 'elementType' not in self.args:
return S_ERROR('"elementType" not found in self.args')
elementType = self.args['elementType']
if element not in ['Site', 'Resource']:
return S_ERROR('element is neither Site nor Resource')
hours = None
if 'hours' in self.args:
hours = self.args['hours']
gOCDBServiceType = None
# Transform DIRAC site names into GOCDB topics
if element == 'Site':
gocSite = getGOCSiteName(elementName)
if not gocSite['OK']: # The site is most probably is not a grid site - not an issue, of course
pass # so, elementName remains unchanged
else:
elementName = gocSite['Value']
# The DIRAC se names mean nothing on the grid, but their hosts do mean.
elif elementType == 'StorageElement':
# for SRM and SRM only, we need to distinguish if it's tape or disk
# if it's not SRM, then gOCDBServiceType will be None (and we'll use them all)
try:
se = StorageElement(elementName)
seOptions = se.options
seProtocols = set(se.localAccessProtocolList) | set(se.localWriteProtocolList)
except AttributeError: # Sometimes the SE can't be instantiated properly
self.log.error("Failure instantiating StorageElement object", elementName)
return S_ERROR("Failure instantiating StorageElement")
if 'SEType' in seOptions and 'srm' in seProtocols:
# Type should follow the convention TXDY
seType = seOptions['SEType']
diskSE = re.search('D[1-9]', seType) is not None
tapeSE = re.search('T[1-9]', seType) is not None
if tapeSE:
gOCDBServiceType = "srm.nearline"
elif diskSE:
gOCDBServiceType = "srm"
res = getSEHosts(elementName)
if not res['OK']:
return res
seHosts = res['Value']
if not seHosts:
return S_ERROR('No seHost(s) for %s' % elementName)
elementName = seHosts # in this case it will return a list, because there might be more than one host only
elif elementType in ['FTS', 'FTS3']:
gOCDBServiceType = 'FTS'
# WARNING: this method presupposes that the server is an FTS3 type
gocSite = getGOCFTSName(elementName)
if not gocSite['OK']:
self.log.warn("FTS not in Resources/FTSEndpoints/FTS3 ?", elementName)
else:
elementName = gocSite['Value']
return S_OK((element, elementName, hours, gOCDBServiceType))
def doNew(self, masterParams=None):
"""
Gets the parameters to run, either from the master method or from its
own arguments.
For every elementName, unless it is given a list, in which case it contacts
the gocdb client. The server is not very stable, so in case of failure tries
a second time.
If there are downtimes, are recorded and then returned.
"""
if masterParams is not None:
element, elementNames = masterParams
hours = 120
elementName = None
gOCDBServiceType = None
else:
params = self._prepareCommand()
if not params['OK']:
return params
element, elementName, hours, gOCDBServiceType = params['Value']
if not isinstance(elementName, list):
elementNames = [elementName]
else:
elementNames = elementName
# WARNING: checking all the DT that are ongoing or starting in given <hours> from now
try:
results = self.gClient.getStatus(element, name=elementNames, startingInHours=hours)
except URLError:
try:
# Let's give it a second chance..
results = self.gClient.getStatus(element, name=elementNames, startingInHours=hours)
except URLError as e:
return S_ERROR(e)
if not results['OK']:
return results
results = results['Value']
if results is None: # no downtimes found
return S_OK(None)
# cleaning the Cache
if elementNames:
cleanRes = self._cleanCommand(element, elementNames)
if not cleanRes['OK']:
return cleanRes
uniformResult = []
# Humanize the results into a dictionary, not the most optimal, but readable
for downtime, downDic in results.items(): # can be an iterator
dt = {}
dt['Name'] = downDic.get('URL', downDic.get('HOSTNAME', downDic.get('SITENAME')))
if not dt['Name']:
return S_ERROR("URL, SITENAME and HOSTNAME are missing from downtime dictionary")
dt['gOCDBServiceType'] = downDic.get('SERVICE_TYPE')
if dt['gOCDBServiceType'] and gOCDBServiceType:
if gOCDBServiceType.lower() != downDic['SERVICE_TYPE'].lower():
self.log.warn("SERVICE_TYPE mismatch",
"between GOCDB (%s) and CS (%s) for %s" % (gOCDBServiceType,
downDic['SERVICE_TYPE'],
dt['Name']))
dt['DowntimeID'] = downtime
dt['Element'] = element
dt['StartDate'] = downDic['FORMATED_START_DATE']
dt['EndDate'] = downDic['FORMATED_END_DATE']
dt['Severity'] = downDic['SEVERITY']
dt['Description'] = downDic['DESCRIPTION'].replace('\'', '')
dt['Link'] = downDic['GOCDB_PORTAL_URL']
uniformResult.append(dt)
storeRes = self._storeCommand(uniformResult)
if not storeRes['OK']:
return storeRes
return S_OK()
def doCache(self):
"""
Method that reads the cache table and tries to read from it. It will
return a list with one dictionary describing the DT if there are results.
"""
params = self._prepareCommand()
if not params['OK']:
return params
element, elementName, hours, gOCDBServiceType = params['Value']
result = self.rmClient.selectDowntimeCache(element=element, name=elementName,
gOCDBServiceType=gOCDBServiceType)
if not result['OK']:
return result
uniformResult = [dict(zip(result['Columns'], res)) for res in result['Value']]
# 'targetDate' can be either now or some 'hours' later in the future
targetDate = datetime.utcnow()
# dtOverlapping is a buffer to assure only one dt is returned
# when there are overlapping outage/warning dt for same element
# on top of the buffer we put the most recent outages
# while at the bottom the most recent warnings,
# assumption: uniformResult list is already ordered by resource/site name, severity, startdate
dtOverlapping = []
if hours is not None:
# IN THE FUTURE
targetDate = targetDate + timedelta(hours=hours)
# sorting by 'StartDate' b/c if we look for DTs in the future
# then we are interested in the earliest DTs
uniformResult.sort(key=itemgetter('Name', 'Severity', 'StartDate'))
for dt in uniformResult:
if (dt['StartDate'] < targetDate) and (dt['EndDate'] > targetDate):
# the list is already ordered in a way that outages come first over warnings
# and the earliest outages are on top of other outages and warnings
# while the earliest warnings are on top of the other warnings
# so what ever comes first in the list is also what we are looking for
dtOverlapping = [dt]
break
else:
# IN THE PRESENT
# sorting by 'EndDate' b/c if we look for DTs in the present
# then we are interested in those DTs that last longer
uniformResult.sort(key=itemgetter('Name', 'Severity', 'EndDate'))
for dt in uniformResult:
if (dt['StartDate'] < targetDate) and (dt['EndDate'] > targetDate):
# if outage, we put it on top of the overlapping buffer
# i.e. the latest ending outage is on top
if dt['Severity'].upper() == 'OUTAGE':
dtOverlapping = [dt] + dtOverlapping
# if warning, we put it at the bottom of the overlapping buffer
# i.e. the latest ending warning is at the bottom
elif dt['Severity'].upper() == 'WARNING':
dtOverlapping.append(dt)
result = None
if dtOverlapping:
dtTop = dtOverlapping[0]
dtBottom = dtOverlapping[-1]
if dtTop['Severity'].upper() == 'OUTAGE':
result = dtTop
else:
result = dtBottom
return S_OK(result)
def doMaster(self):
""" Master method, which looks little bit spaghetti code, sorry !
- It gets all sites and transforms them into gocSites.
- It gets all the storage elements and transforms them into their hosts
- It gets the the CEs (FTS and file catalogs will come).
"""
gocSites = getGOCSites()
if not gocSites['OK']:
return gocSites
gocSites = gocSites['Value']
sesHosts = getStorageElementsHosts()
if not sesHosts['OK']:
return sesHosts
sesHosts = sesHosts['Value']
resources = sesHosts if sesHosts else []
ftsServer = getFTS3Servers(hostOnly=True)
if ftsServer['OK'] and ftsServer['Value']:
resources.extend(ftsServer['Value'])
# TODO: file catalogs need also to use their hosts
# fc = CSHelpers.getFileCatalogs()
# if fc[ 'OK' ]:
# resources = resources + fc[ 'Value' ]
res = getCESiteMapping()
if res['OK'] and res['Value']:
resources.extend(list(res['Value']))
self.log.verbose('Processing Sites', ', '.join(gocSites if gocSites else ['NONE']))
siteRes = self.doNew(('Site', gocSites))
if not siteRes['OK']:
self.metrics['failed'].append(siteRes['Message'])
self.log.verbose('Processing Resources', ', '.join(resources if resources else ['NONE']))
resourceRes = self.doNew(('Resource', resources))
if not resourceRes['OK']:
self.metrics['failed'].append(resourceRes['Message'])
return S_OK(self.metrics)
|
yujikato/DIRAC
|
src/DIRAC/ResourceStatusSystem/Command/DowntimeCommand.py
|
Python
|
gpl-3.0
| 13,876
|
'''Tests about locks'''
import redis
from common import TestQless
class TestLocks(TestQless):
'''Locks tests'''
def test_malformed(self):
'''Enumerate malformed inputs into heartbeat'''
self.assertMalformed(self.lua, [
('heartbeat', 0),
('heartbeat', 0, 'jid'),
('heartbeat', 0, 'jid', 'worker', '[}')
])
def setUp(self):
TestQless.setUp(self)
# No grace period for any of these tests
self.lua('config.set', 0, 'grace-period', 0)
def test_move(self):
'''Moving ajob should expire any existing locks'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
self.lua('pop', 1, 'queue', 'worker', 10)
self.lua('heartbeat', 2, 'jid', 'worker', {})
# Move the job after it's been popped
self.lua('put', 3, 'worker', 'other', 'jid', 'klass', {}, 0)
# Now this job cannot be heartbeated
self.assertRaisesRegexp(redis.ResponseError, r'waiting',
self.lua, 'heartbeat', 4, 'jid', 'worker', {})
def test_lose_lock(self):
'''When enough time passes, we lose our lock on a job'''
# Put and pop a job
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
job = self.lua('pop', 1, 'queue', 'worker', 10)[0]
# No jobs should be available since the lock is still valid
self.assertEqual(self.lua('pop', 2, 'queue', 'worker', 10), {})
self.assertEqual(self.lua(
'pop', job['expires'] + 10, 'queue', 'another', 10), [{
'data': '{}',
'dependencies': {},
'dependents': {},
'expires': 131,
'failure': {},
'history': [
{'q': 'queue', 'what': 'put', 'when': 0},
{'what': 'popped', 'when': 1, 'worker': 'worker'},
{'what': 'timed-out', 'when': 71},
{'what': 'popped', 'when': 71, 'worker': 'another'}],
'jid': 'jid',
'klass': 'klass',
'priority': 0,
'queue': 'queue',
'remaining': 4,
'retries': 5,
'state': 'running',
'tags': {},
'tracked': False,
'throttles': ['ql:q:queue'],
'worker': 'another',
'spawned_from_jid': False}])
# When we try to heartbeat, it should raise an exception
self.assertRaisesRegexp(redis.ResponseError, r'given out to another',
self.lua, 'heartbeat', 1000, 'jid', 'worker', {})
def test_heartbeat(self):
'''Heartbeating extends the lock'''
# Put and pop a job
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
job = self.lua('pop', 1, 'queue', 'worker', 10)[0]
# No jobs should be available since the lock is still valid
self.assertEqual(self.lua('pop', 2, 'queue', 'worker', 10), {})
# We should see our expiration update after a heartbeat
self.assertTrue(
self.lua('heartbeat', 3, 'jid', 'worker', {}) > job['expires'])
def test_heartbeat_waiting(self):
'''Only popped jobs can be heartbeated'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
self.assertRaisesRegexp(redis.ResponseError, r'waiting',
self.lua, 'heartbeat', 1, 'jid', 'worker', {})
# Pop is and it should work
self.lua('pop', 2, 'queue', 'worker', 10)
self.lua('heartbeat', 3, 'jid', 'worker', {})
def test_heartbeat_failed(self):
'''Cannot heartbeat a failed job'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
self.lua('pop', 0, 'queue', 'worker', 10)
self.lua('fail', 0, 'jid', 'worker', 'foo', 'bar', {})
self.assertRaisesRegexp(redis.ResponseError, r'failed',
self.lua, 'heartbeat', 0, 'jid', 'worker', {})
def test_heartbeat_depends(self):
'''Cannot heartbeat a dependent job'''
self.lua('put', 0, 'worker', 'queue', 'a', 'klass', {}, 0)
self.lua('put', 0, 'worker', 'queue', 'b', 'klass', {}, 0, 'depends', ['a'])
self.assertRaisesRegexp(redis.ResponseError, r'depends',
self.lua, 'heartbeat', 0, 'b', 'worker', {})
def test_heartbeat_scheduled(self):
'''Cannot heartbeat a scheduled job'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 1)
self.assertRaisesRegexp(redis.ResponseError, r'scheduled',
self.lua, 'heartbeat', 0, 'jid', 'worker', {})
def test_heartbeat_nonexistent(self):
'''Cannot heartbeat a job that doesn't exist'''
self.assertRaisesRegexp(redis.ResponseError, r'does not exist',
self.lua, 'heartbeat', 0, 'jid', 'worker', {})
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
self.lua('pop', 0, 'queue', 'worker', 10)
self.lua('heartbeat', 0, 'jid', 'worker', {})
def test_heartbeat_completed(self):
'''Cannot heartbeat a job that has been completed'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
self.lua('pop', 0, 'queue', 'worker', 10)
self.lua('complete', 0, 'jid', 'worker', 'queue', {})
self.assertRaisesRegexp(redis.ResponseError, r'complete',
self.lua, 'heartbeat', 0, 'jid', 'worker', {})
def test_heartbeat_wrong_worker(self):
'''Only the worker with a job's lock can heartbeat it'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
self.lua('pop', 1, 'queue', 'worker', 10)
# Another worker can't heartbeat, but we can
self.assertRaisesRegexp(redis.ResponseError, r'another worker',
self.lua, 'heartbeat', 2, 'jid', 'another', {})
self.lua('heartbeat', 2, 'jid', 'worker', {})
class TestRetries(TestQless):
'''Test all the behavior surrounding retries'''
def setUp(self):
TestQless.setUp(self)
# No grace periods for this
self.lua('config.set', 0, 'grace-period', 0)
self.lua('config.set', 0, 'heartbeat', -10)
def test_basic(self):
'''The retries and remaining counters are decremented appropriately'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0, 'retries', 5)
self.lua('pop', 0, 'queue', 'worker', 10)
job = self.lua('pop', 0, 'queue', 'another', 10)[0]
self.assertEqual(job['retries'], 5)
self.assertEqual(job['remaining'], 4)
def test_move_failed_retries(self):
'''Can move a job even if it's failed retries'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0, 'retries', 0)
self.lua('pop', 0, 'queue', 'worker', 10)
self.assertEqual(self.lua('pop', 0, 'queue', 'worker', 10), {})
self.assertEqual(self.lua('get', 0, 'jid')['state'], 'failed')
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
self.assertEqual(self.lua('get', 0, 'jid')['state'], 'waiting')
def test_reset_complete(self):
'''Completing a job resets its retries counter'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0, 'retries', 5)
self.lua('pop', 0, 'queue', 'worker', 10)
self.lua('pop', 0, 'queue', 'worker', 10)
self.lua(
'complete', 0, 'jid', 'worker', 'queue', {}, 'next', 'queue')
self.assertEqual(self.lua(
'pop', 0, 'queue', 'worker', 10)[0]['remaining'], 5)
def test_reset_move(self):
'''Moving a job resets its retries counter'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0, 'retries', 5)
self.lua('pop', 0, 'queue', 'worker', 10)
self.lua('pop', 0, 'queue', 'worker', 10)
# Re-put the job without specifying retries
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
self.assertEqual(self.lua(
'pop', 0, 'queue', 'worker', 10)[0]['remaining'], 5)
class TestRetry(TestQless):
'''Test all the behavior surrounding retry'''
maxDiff = 100000
def test_malformed(self):
'''Enumerate all the malformed inputs'''
self.assertMalformed(self.lua, [
('retry', 0),
('retry', 0, 'jid'),
('retry', 0, 'jid', 'queue'),
('retry', 0, 'jid', 'queue', 'worker'),
('retry', 0, 'jid', 'queue', 'worker', 'foo'),
])
# function QlessJob:retry(now, queue, worker, delay, group, message)
def test_retry_waiting(self):
'''Cannot retry a job that's waiting'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
self.assertRaisesRegexp(redis.ResponseError, r'not currently running',
self.lua, 'retry', 0, 'jid', 'queue', 'worker', 0)
def test_retry_completed(self):
'''Cannot retry a completed job'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
self.lua('pop', 0, 'queue', 'worker', 10)
self.lua('complete', 0, 'jid', 'worker', 'queue', {})
self.assertRaisesRegexp(redis.ResponseError, r'not currently running',
self.lua, 'retry', 0, 'jid', 'queue', 'worker', 0)
def test_retry_failed(self):
'''Cannot retry a failed job'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
self.lua('pop', 0, 'queue', 'worker', 10)
self.lua('fail', 0, 'jid', 'worker', 'group', 'message', {})
self.assertRaisesRegexp(redis.ResponseError, r'not currently running',
self.lua, 'retry', 0, 'jid', 'queue', 'worker', 0)
def test_retry_otherowner(self):
'''Cannot retry a job owned by another worker'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
self.lua('pop', 0, 'queue', 'worker', 10)
self.assertRaisesRegexp(redis.ResponseError, r'another worker',
self.lua, 'retry', 0, 'jid', 'queue', 'another', 0)
def test_retry_complete(self):
'''Cannot complete a job immediately after retry'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
self.lua('pop', 0, 'queue', 'worker', 10)
self.lua('retry', 0, 'jid', 'queue', 'worker', 0)
self.assertRaisesRegexp(redis.ResponseError, r'not currently running',
self.lua, 'complete', 0, 'jid', 'worker', 'queue', {})
def test_retry_fail(self):
'''Cannot fail a job immediately after retry'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
self.lua('pop', 0, 'queue', 'worker', 10)
self.lua('retry', 0, 'jid', 'queue', 'worker', 0)
self.assertRaisesRegexp(redis.ResponseError, r'not currently running',
self.lua, 'fail', 0, 'jid', 'worker', 'group', 'message', {})
def test_retry_heartbeat(self):
'''Cannot heartbeat a job immediately after retry'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
self.lua('pop', 0, 'queue', 'worker', 10)
self.lua('retry', 0, 'jid', 'queue', 'worker', 0)
self.assertRaisesRegexp(redis.ResponseError, r'not currently running',
self.lua, 'heartbeat', 0, 'jid', 'worker', {})
def test_retry_nonexistent(self):
'''It's an error to retry a nonexistent job'''
self.assertRaisesRegexp(redis.ResponseError, r'does not exist',
self.lua, 'retry', 0, 'jid', 'queue', 'another', 0)
def test_retry_group_message(self):
'''Can provide a group/message to be used for retries'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0, 'retries', 0)
self.lua('pop', 0, 'queue', 'worker', 10)
self.lua(
'retry', 0, 'jid', 'queue', 'worker', 0, 'group', 'message')
self.assertEqual(self.lua('get', 0, 'jid'), {'data': '{}',
'dependencies': {},
'dependents': {},
'expires': 0,
'failure': {'group': 'group',
'message': 'message',
'when': 0,
'worker': 'worker'},
'history': [{'q': 'queue', 'what': 'put', 'when': 0},
{'what': 'popped', 'when': 0, 'worker': 'worker'},
{'group': 'group', 'what': 'failed', 'when': 0}],
'jid': 'jid',
'klass': 'klass',
'priority': 0,
'queue': 'queue',
'remaining': -1,
'retries': 0,
'state': 'failed',
'tags': {},
'tracked': False,
'throttles': ['ql:q:queue'],
'worker': u'',
'spawned_from_jid': False})
def test_retry_delay(self):
'''Can retry a job with a delay and then it's considered scheduled'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
self.lua('pop', 0, 'queue', 'worker', 10)
self.lua(
'retry', 0, 'jid', 'queue', 'worker', 10)
# Now it should be considered scheduled
self.assertEqual(self.lua('pop', 0, 'queue', 'worker', 10), {})
self.assertEqual(self.lua('get', 0, 'jid')['state'], 'scheduled')
def test_retry_wrong_queue(self):
'''Cannot retry a job in the wrong queue'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
self.lua('pop', 0, 'queue', 'worker', 10)
self.lua('retry', 0, 'jid', 'queue', 'worker', 0)
self.assertRaisesRegexp(redis.ResponseError, r'not currently running',
self.lua, 'heartbeat', 0, 'jid', 'worker', {})
def test_retry_failed_retries(self):
'''Retry can be invoked enough to cause it to fail retries'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0, 'retries', 0)
self.lua('pop', 0, 'queue', 'worker', 10)
self.lua(
'retry', 0, 'jid', 'queue', 'worker', 0)
self.assertEqual(self.lua('get', 0, 'jid'), {
'data': '{}',
'dependencies': {},
'dependents': {},
'expires': 0,
'failure': {
'group': 'failed-retries-queue',
'message': 'Job exhausted retries in queue "queue"',
'when': 0,
'worker': u''},
'history': [
{'q': 'queue', 'what': 'put', 'when': 0},
{'what': 'popped', 'when': 0, 'worker': 'worker'},
{'group': 'failed-retries-queue', 'what': 'failed', 'when': 0}],
'jid': 'jid',
'klass': 'klass',
'priority': 0,
'queue': 'queue',
'remaining': -1,
'retries': 0,
'state': 'failed',
'tags': {},
'tracked': False,
'throttles': ['ql:q:queue'],
'worker': u'',
'spawned_from_jid': False
})
class TestGracePeriod(TestQless):
'''Make sure the grace period is honored'''
# Our grace period for the tests
grace = 10
def setUp(self):
TestQless.setUp(self)
# Ensure whe know what the grace period is
self.lua('config.set', 0, 'grace-period', self.grace)
def test_basic(self):
'''The lock must expire, and then the grace period must pass'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
job = self.lua('pop', 1, 'queue', 'worker', 10)[0]
# Now, we'll lose the lock, but we should only get a warning, and not
# actually have the job handed off to another yet
expires = job['expires'] + 10
self.assertEqual(self.lua('pop', expires, 'queue', 'another', 10), {})
# However, once the grace period passes, we should be fine
self.assertNotEqual(
self.lua('pop', expires + self.grace, 'queue', 'another', 10), {})
def test_repeated(self):
'''Grace periods should be given for each lock lost, not just first'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0, 'retries', 20)
job = self.lua('pop', 0, 'queue', 'worker', 10)[0]
for _ in xrange(10):
# Now, we'll lose the lock, but we should only get a warning, and
# not actually have the job handed off to another yet
expires = job['expires'] + 10
self.assertEqual(
self.lua('pop', expires, 'queue', 'worker', 10), {})
# However, once the grace period passes, we should be fine
job = self.lua(
'pop', expires + self.grace, 'queue', 'worker', 10)[0]
def test_fail(self):
'''Can still fail a job during the grace period'''
self.lua('put', 0, 'worker', 'queue', 'jid', 'klass', {}, 0)
job = self.lua('pop', 0, 'queue', 'worker', 10)[0]
# Lose the lock and fail the job
expires = job['expires'] + 10
self.lua('pop', expires, 'queue', 'worker', 10)
self.lua('fail', expires, 'jid', 'worker', 'foo', 'bar', {})
# And make sure that no job is available after the grace period
self.assertEqual(
self.lua('pop', expires + self.grace, 'queue', 'worker', 10), {})
|
backupify/qless-core
|
test/test_locks.py
|
Python
|
mit
| 17,210
|
# Copyright 2014 Rackspace Inc.
#
# Author: Tim Simmons <tim.simmons@rackspace.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
import dns
import dns.rdataclass
import dns.rdatatype
import dns.exception
import dns.flags
import dns.rcode
import dns.message
import dns.opcode
from oslo_config import cfg
from oslo_log import log as logging
from designate.i18n import _LI
from designate.i18n import _LW
from designate.backend import base
from designate import exceptions
from designate.mdns import rpcapi as mdns_api
from designate import objects
dns_query = eventlet.import_patched('dns.query')
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class AgentPoolBackend(base.Backend):
__plugin_name__ = 'agent'
__backend_status__ = 'untested'
def __init__(self, target):
super(AgentPoolBackend, self).__init__(target)
self.host = self.options.get('host')
self.port = int(self.options.get('port'))
self.timeout = CONF['service:pool_manager'].poll_timeout
self.retry_interval = CONF['service:pool_manager'].poll_retry_interval
self.max_retries = CONF['service:pool_manager'].poll_max_retries
@property
def mdns_api(self):
return mdns_api.MdnsAPI.get_instance()
def create_domain(self, context, domain):
LOG.debug('Create Domain')
response, retry = self._make_and_send_dns_message(domain.name, 15, 14,
65282, 65280, self.host, self.port)
if response is None:
raise exceptions.Backend()
def update_domain(self, context, domain):
LOG.debug('Update Domain')
values = {
'host': self.host,
'port': self.port,
'pool_id': CONF['service:central'].default_pool_id
}
nameserver = objects.PoolNameserver(**values)
self.mdns_api.notify_zone_changed(
context, domain, nameserver, self.timeout,
self.retry_interval, self.max_retries, 0)
def delete_domain(self, context, domain):
LOG.debug('Delete Domain')
response, retry = self._make_and_send_dns_message(domain.name, 15, 14,
65283, 65280, self.host, self.port)
if response is None:
raise exceptions.Backend()
def _make_and_send_dns_message(self, domain_name, timeout, opcode,
rdatatype, rdclass, dest_ip,
dest_port):
dns_message = self._make_dns_message(domain_name, opcode, rdatatype,
rdclass)
retry = 0
response = None
LOG.info(_LI("Sending '%(msg)s' for '%(zone)s' to '%(server)s:"
"%(port)d'.") %
{'msg': str(opcode),
'zone': domain_name, 'server': dest_ip,
'port': dest_port})
response = self._send_dns_message(
dns_message, dest_ip, dest_port, timeout)
if isinstance(response, dns.exception.Timeout):
LOG.warn(_LW("Got Timeout while trying to send '%(msg)s' for "
"'%(zone)s' to '%(server)s:%(port)d'. Timeout="
"'%(timeout)d' seconds. Retry='%(retry)d'") %
{'msg': str(opcode),
'zone': domain_name, 'server': dest_ip,
'port': dest_port, 'timeout': timeout,
'retry': retry})
response = None
elif isinstance(response, dns_query.BadResponse):
LOG.warn(_LW("Got BadResponse while trying to send '%(msg)s' "
"for '%(zone)s' to '%(server)s:%(port)d'. Timeout"
"='%(timeout)d' seconds. Retry='%(retry)d'") %
{'msg': str(opcode),
'zone': domain_name, 'server': dest_ip,
'port': dest_port, 'timeout': timeout,
'retry': retry})
response = None
return (response, retry)
# Check that we actually got a NOERROR in the rcode and and an
# authoritative answer
elif not (response.flags & dns.flags.AA) or dns.rcode.from_flags(
response.flags, response.ednsflags) != dns.rcode.NOERROR:
LOG.warn(_LW("Failed to get expected response while trying to "
"send '%(msg)s' for '%(zone)s' to '%(server)s:"
"%(port)d'. Response message: %(resp)s") %
{'msg': str(opcode),
'zone': domain_name, 'server': dest_ip,
'port': dest_port, 'resp': str(response)})
response = None
return (response, retry)
else:
return (response, retry)
return (response, retry)
def _make_dns_message(self, zone_name, opcode, rdatatype, rdclass):
dns_message = dns.message.make_query(zone_name, rdatatype,
rdclass=rdclass)
dns_message.flags = 0
dns_message.set_opcode(opcode)
dns_message.flags |= dns.flags.AA
return dns_message
def _send_dns_message(self, dns_message, dest_ip, dest_port, timeout):
try:
if not CONF['service:mdns'].all_tcp:
response = dns_query.udp(
dns_message, dest_ip, port=dest_port, timeout=timeout)
else:
response = dns_query.tcp(
dns_message, dest_ip, port=dest_port, timeout=timeout)
return response
except dns.exception.Timeout as timeout:
return timeout
except dns_query.BadResponse as badResponse:
return badResponse
|
kiall/designate-py3
|
designate/backend/agent.py
|
Python
|
apache-2.0
| 6,245
|
#
# x86db.py
#
# Copyright (C) 2009 Gil Dabah, http://ragestorm.net/disops/
#
from x86header import *
# The mandatory prefix bytes list:
_MandatoryPrefixesList = [0x9b, 0x66, 0xf3, 0xf2]
# Map the mandatory prefix to its corresponding entry number in the PREFIXED table.
# Note that no-prefix is first entry.
_MandatoryPrefixToEntry = {0x9b: 1, 0x66: 1, 0xf3: 2, 0xf2: 3}
#
# Prefixed Table (12 entries):
#
# Normal optional prefix:
# 0 - NONE, 1 - 66/9b, 2 - f3, 3 - f2
#
# With VEX prefix, VVVV is used:
# 4 - NONE, 5 - 66/9b, 6 - f3, 7 - f2
#
# With VEX prefix, VVVV is not used:
# 8 - NONE, 9 - 66/9b, 10 - f3, 11 - f2
#
# Note: VEX.L and VEX.W affections can be described in a single InstInfo structure and thus we don't need to split them too.
# The problem with instructions that are encodable with or without VEX.VVVV
# is that a single InstInfo structure doesn't have a few sets of operands.
class DBException(Exception):
""" Used in order to throw an exception when an error occurrs in the DB. """
pass
class InstructionInfo:
""" Instruction Info holds all information relevant for an instruction.
another string member, self.tag, will be initialized in runtime to have the bytes of the opcode (I.E: 0f_0f_a7). """
def __init__(self, classType, OL, pos, isModRMIncluded, mnemonics, operands, flags):
self.classType = classType
# Check for special mandatory-prefixed instruction.
if pos[0] in _MandatoryPrefixesList:
self.pos = pos[1:]
self.prefix = pos[0]
self.OL = OpcodeLength.NextOL[OL]
self.prefixed = True
self.entryNo = _MandatoryPrefixToEntry[pos[0]]
else:
self.pos = pos
self.prefix = 0
self.OL = OL
self.prefixed = False
self.entryNo = 0
self.mnemonics = mnemonics
self.operands = operands
self.flags = flags
if len(self.operands) == 3:
self.flags |= InstFlag.USE_OP3
elif len(self.operands) == 4:
self.flags |= InstFlag.USE_OP3 | InstFlag.USE_OP4
if isModRMIncluded:
self.flags |= InstFlag.MODRM_INCLUDED
self.VEXtag = ""
# Special treatment for VEX instructions:
if self.flags & InstFlag.PRE_VEX:
# If one of the operands uses VEX.VVVV, change the entryNo according to table described above.
if len(filter(lambda x: x in [OperandType.VXMM, OperandType.VYMM, OperandType.VYXMM], self.operands)) >= 1:
self.entryNo += 4 # With VEX.VVVV
self.VEXtag = "V"
else:
self.entryNo += 8 # Without VEX.VVVV
self.VEXtag = "NV"
# Assert that OL is < OL_33:
if self.OL >= OpcodeLength.OL_33:
raise DBException("Instruction OL is bigger than OL_33.")
class InstructionsTable:
""" A table contains all instructions under its index. The number of instructions varyies and depends on its type.
Note that a table be nested in itself.
Every table has its position beginning in the db.root.
So all opcodes that begin with first byte with the value of 0x0f, will be in the 0x0f table (which has a tag "0f"). """
Full = 256
Divided = 72
Group = 8
Prefixed = 12
def __init__(self, size, tag, pos):
self.list = {}
self.size = size
# The type of the table is determined by its actual size.
# Set up a limit number for the table, used in the iterator.
if size == self.Full:
self.type = NodeType.LIST_FULL
self.limit = self.Full
elif size == self.Divided:
# Since the ranges of Divided instructions are 0x0-0x7 and 0xc0-0xff, we limit it as a full sized table.
self.type = NodeType.LIST_DIVIDED
self.limit = self.Full
elif size == self.Group:
self.type = NodeType.LIST_GROUP
self.limit = size
elif size == self.Prefixed:
self.type = NodeType.LIST_PREFIXED
self.limit = size
self.tag = tag
self.pos = pos
def __iter__(self):
""" This is the "ctor" of the iterator. """
# Begin with the first opcode.
self.__iterIndex = -1
return self
def next(self):
""" This is the core of the iterator, return the next instruction or halt. """
# Get next instruction.
self.__iterIndex += 1
# Optimization: skip unused entries in Divided tables.
if self.type == NodeType.LIST_DIVIDED and self.__iterIndex == 8:
# Actually it must be done, because the iterator-user assumes Divided table is 72 entries long.
# So it happens that len(list(divided-table)) == 72 !
self.__iterIndex = 0xc0
# Stop the iteration in case we reached the limit.
if self.__iterIndex == self.limit:
raise StopIteration
# If we have the key return its corresponding opcode,
# it might be that we return an object of another nested InstructionTable as well.
if self.list.has_key(self.__iterIndex):
item = self.list[self.__iterIndex]
return item
# In case no InstructionInfo or InstructionsTable were found, return None (this doesn't stop the iteration!).
return None
class GenBlock:
""" There are some special instructions which have the operand encoded in the code byte itself.
For instance: 40: INC EAX 41: ECX. push/pop/dec, etc...
Therefore, these instructions can be treated specially in the tables, so instead of generating a unique instruction-info per such instruction.
We "cheat" by making some entries in the table point to the same instruction-info.
Following the last example, all instructions in the range of 0x40-0x47 point to the instruction-info 0x40, which means INC <REG-FROM-SAME-BYTE>.
This means that we don't call SetInstruction for the range 0x40-0x47, only a single set instruction per this block
(8 instructions which their REG field is extracted from their own byte code).
So in order to simulate the real case where there are actually 8 instructions that were set using SetInstruction,
this class handles this special flag and returns the same first instruction for its corresponding block at runtime. """
# Number of generated instructions in a row.
Block = 8
def __init__(self, list):
if isinstance(list, InstructionsTable) == False:
raise DBException("List must be InstructionsTable object")
self.list = list
def __iter__(self):
""" This is the "ctor" of the iterator. """
# Count up to Block instructions.
self.counter = 0
# This is the item we're going to return every iteration.
self.item = None
# Start the InstructionsTable internal iterator as well.
self.list.__iter__()
return self
def next(self):
# Get next item from internal iterator.
i = self.list.next()
# If there's an item set, it means we hit the special opcode before.
if self.item != None:
# Countup
self.counter += 1
# If we reached the limit, stop.
if self.counter == self.Block:
self.counter = 0
self.item = None
# See if the item we read is an opcode and whether it has the GEN_BLOCK.
if isinstance(i, InstructionInfo) and i.flags & InstFlag.GEN_BLOCK:
# Store this instruction for next 8 iterations.
self.item = i
return i
elif i == None and self.item != None:
# Return the stored item, only if there's no opcode set.
# Sometimes, there are GEN_BLOCK instructions and the next instruction is also set (means it has its own OpcodeInfo)
# so we have to return real instructions first, and then generated instructions.
return self.item
# Return the instruction we read from the real list.
return i
class InstructionsDB:
""" The Instructions Data Base holds all instructions under it.
The self.root is where all instructions begin, so instructions that are 1 byte long, will be set directly there.
But instructions that are 2 instructions long, will be set under another InstructionsTable nested inside the self.root.
The DB is actually the root of a Trie. (For more info about Trie see diStorm's instructions.h). """
def __init__(self):
# Root contains a Full InstructionsTable with 256 entries.
self.root = InstructionsTable(InstructionsTable.Full, "", [])
def HandleMandatoryPrefix(self, type, o, pos, ii, tag):
if ii.prefixed:
ii.tag = "_%02X%s" % (ii.prefix, ii.tag)
if ii.flags & InstFlag.PRE_VEX:
ii.tag = "_%s%s" % (ii.VEXtag, ii.tag)
# If there is nothing at this index, create a prefixed table.
if o.list.has_key(pos[0]) == False:
o.list[pos[0]] = InstructionsTable(InstructionsTable.Prefixed, tag, "")
# If there's a table constructred already (doesn't matter if by last line).
if isinstance(o.list[pos[0]], InstructionsTable) and o.list[pos[0]].type == NodeType.LIST_PREFIXED:
# Check for obvious collision.
if o.list[pos[0]].list.has_key(ii.entryNo):
raise DBException("Collision in prefix table.")
# Link the instruction to its index.
o.list[pos[0]].list[ii.entryNo] = ii
# The index is taken and it's not a prefixed table, we will have to convert it then.
else:
# Store current instruction which we are going to replace with a table.
tmp = o.list[pos[0]]
# Check for collision first, if it's a prefixed instruction at all.
if (not ii.prefixed and ii.pos[0] != 0x0f) or (tmp.entryNo == ii.entryNo):
msg = "Instruction Collision: %s" % str(o.list[pos[0]])
raise DBException(msg)
# Create a prefixed table.
o.list[pos[0]] = InstructionsTable(InstructionsTable.Prefixed, tag, "")
# Link the previous instruction.
o.list[pos[0]].list[tmp.entryNo] = tmp
# Link new instruction.
o.list[pos[0]].list[ii.entryNo] = ii
def CreateSet(self, type, o, pos, ii, tag = "", level = 0):
""" This is the most improtant function in the whole project.
It builds and links a new InstructionsTable if required and
afterwards sets the given InstructionInfo object in its correct place.
It knows to generate the nested lists dynamically, building a Trie DB.
The algorithm for building the nested tables is as follows:
See if you got to the last byte code of the instruction, if so, link the instruction info and exit.
Try to enter the first index in the list, if it doesn't exist, create it.
If it exists, take off the first index from its array, (since we already entered it), and RECURSE with the new(/existing) list now.
In practice it's a bit more complex since there are 3 types of tables we can create, and we have to take care of it.
Let's see two examples of how it really works with the following input (assuming root is empty):
0: OL_3, root, [0x67, 0x69, 0x6c], II_INST
1: Create Table - with size of 256 at index 0x67
Recurse - OL_2, root[0x67], [0x69, 0x6c], II_INST
2: Create Table - with size of 256 at index 0x69
Recurse - OL_1, root[0x67][0x69], [0x6c], II_INST
3: Link Instruction Information - at index 0x6c, since type is OL_1
root[0x67][0x69][0x6c] = II_INST
exit
Second example:
0: OL_23, root, [0x0f, 0xb0, 0x03], II_INST2
1: Create Table - with size of 256 at index 0x0f
Recurse - OL_13, root[0x0f], [0xb0, 0x03], II_INST2
2: Create Table - with size of 8(GROUP) at index 0xb0, since input type is OL_13
Recurse - OL_1, root[0x0f][0xb0], [0x03], II_INST2
3: Link Instruction Information - at index 0x03, since type is OL_1
root[0x0f][0xb0][0x03] = II_INST2
exit
Every table we create is usually a Full sized table (256 entries), since it can point to next 256 instructions.
If the input type is OL_13 or OL_1d we know we have to create a Group sized table or Divided sized table, correspondingly.
OL_13/OL_1d means its the last table to build in the sequence of byte codes of the given instruction.
OL_1 always means that we just have to link the instruction information and that all tables are built already.
Therefore the "next" of OL_13/OL_1d is always OL_1.
Special case for mandatory prefixed instructions:
If the instruction's first opcode byte is a mandatory prefix (0x66, 0xf2, 0xf3), then we will skip it in the root.
However, it will be set in the same table of that instruction without the prefix byte.
Therefore if there are a few instructions that the only difference among them is the mandatory prefix byte,
they will share a special table. This "PREFIXED" table points to the Instruction Information of those possible instructions.
Also the information for the same instruction without any mandatory prefix will be stored in this table.
Entries order: None, 0x66, 0xf2, 0xf3.
Example: [0x0f, 0x2a], ["CVTPI2PS"]
[0x66, 0x0f, 0x2a], ["CVTPI2PD"]
[0xf3, 0x0f, 0x2a], ["CVTSI2SS"]
When there is a collision with the same instruction, we will know to change it into a PREFIXED table.
"""
# Keep track of the index we scan.
tag += "_%02X" % pos[0]
# If the type is OL_1, it means we have to set the InstructionInfo in o.list, which is the last table we need.
if type == OpcodeLength.OL_1:
# Set the tag (position formatted in text) of the Instruction itself.
ii.tag = tag
# If the instruction is already set, there's something wrong with the DB initializer,
# probably a collision inside x86sets.py.
if ii.prefixed:
self.HandleMandatoryPrefix(type, o, pos, ii, tag)
return
if o.list.has_key(pos[0]) == True:
self.HandleMandatoryPrefix(type, o, pos, ii, tag)
return
# Link the instruction info in its place.
o.list[pos[0]] = ii
# Stop recursion.
return
# See whether we have to create a nested table.
if o.list.has_key(pos[0]) == False:
# All tables are full sized.
tableType = InstructionsTable.Full
if type == OpcodeLength.OL_13:
# Except 1.3 - Create a Group table.
tableType = InstructionsTable.Group
elif type == OpcodeLength.OL_1d:
# And except 1.d - Create a Divided table.
tableType = InstructionsTable.Divided
# Create and link the new table at the same time in its place.
o.list[pos[0]] = InstructionsTable(tableType, tag, ii.pos[:-1])
# This is the tricky recursive call,
# 1) Get the next OL we need, so we know what to do next, place an instruction info, or create another table.
# 2) Return the current table, which must be created, because it's either was just created or exists from before.
# 3) Since pos is a list with the indexes of the instruction, and we just used pos[0], we move to the next indexes,
# by removing the first item.
# 4) The instruction info to set when we reach its place.
# 5) The updated tag with the indexes of the instruction.
self.CreateSet(OpcodeLength.NextOL[type], o.list[pos[0]], pos[1:], ii, tag, level + 1)
def SetInstruction(self, *args):
""" This function is used in order to insert an instruction info into the DB. """
# *args = ISetClass, OL, pos, mnemonics, operands, flags
# Construct an Instruction Info object with the info given in args.
opcode = args[1].replace(" ", "").split(",")
# The number of bytes is the base length, now we need to check the last entry.
pos = [int(i[:2], 16) for i in opcode]
last = opcode[-1][2:] # Skip hex of last full byte
isModRMIncluded = False # Indicates whether 3 bits of the REG field in the ModRM byte were used.
if last[:2] == "//": # Divided Instruction
pos.append(int(last[2:], 16))
isModRMIncluded = True
try:
OL = {1:OpcodeLength.OL_1d, 2:OpcodeLength.OL_2d}[len(opcode)]
except KeyError:
raise DBException("Invalid divided instruction opcode")
elif last[:1] == "/": # Group Instruction
isModRMIncluded = True
pos.append(int(last[1:], 16))
try:
OL = {1:OpcodeLength.OL_13, 2:OpcodeLength.OL_23, 3:OpcodeLength.OL_33}[len(opcode)]
except KeyError:
raise DBException("Invalid group instruction opcode")
elif len(last) != 0:
raise DBException("Invalid last byte in opcode")
# Normal full bytes instruction
else:
try:
OL = {1:OpcodeLength.OL_1, 2:OpcodeLength.OL_2, 3:OpcodeLength.OL_3, 4:OpcodeLength.OL_4}[len(opcode)]
except KeyError:
raise DBException("Invalid normal instruction opcode")
ii = InstructionInfo(args[0], OL, pos, isModRMIncluded, args[2], args[3], args[4])
# Insert the instruction into the table, take care of nested tables, etc...
self.CreateSet(ii.OL, self.root, ii.pos, ii)
def GenerateTables(self, filter):
""" GenerateTables is a generator function that iterates over an InstructionsTable,
it returns all nested tables in the DB.
The tables are returned in BFS order!
If you pass a filter, that filter will be called for every table and
should return True for letting the generator return it. """
# Start with the root, if no list was specified.
list = self.root
list.tag = "ROOT"
# Return the root first.
stack = [list]
while len(stack) > 0:
list = stack.pop(0)
yield list
for i in list:
if isinstance(i, InstructionsTable):
if filter is not None:
# If a filter is set, call it with the table.
if filter(i):
# If the return value was True, return this same InstructionInfo.
stack.append(i)
else:
# If no filter was set, just push this table.
stack.append(i)
|
LiamKarlMitchell/InfiniteSky
|
TSX_Client/diStorm/disOps/x86db.py
|
Python
|
gpl-3.0
| 16,671
|
# -*- coding: utf-8 -*-
# File : unittest.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.
import unittest
import numpy as np
from torch.autograd import Variable
def as_numpy(v):
if isinstance(v, Variable):
v = v.data
return v.cpu().numpy()
class TorchTestCase(unittest.TestCase):
def assertTensorClose(self, a, b, atol=1e-3, rtol=1e-3):
npa, npb = as_numpy(a), as_numpy(b)
self.assertTrue(
np.allclose(npa, npb, atol=atol),
'Tensor close check failed\n{}\n{}\nadiff={}, rdiff={}'.format(a, b, np.abs(npa - npb).max(), np.abs((npa - npb) / np.fmax(npa, 1e-5)).max())
)
|
aditiiyer/CERR
|
CERR_core/ModelImplementationLibrary/SegmentationModels/ModelDependencies/CT_Ventricles_DeepLab/modeling/sync_batchnorm/unittest.py
|
Python
|
lgpl-2.1
| 834
|
#!/usr/bin/env python3
# OpenCL built-in library: type conversion functions
#
# Copyright (c) 2013 Victor Oliveira <victormatheus@gmail.com>
# Copyright (c) 2013 Jesse Towner <jessetowner@lavabit.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# This script generates the file convert_type.cl, which contains all of the
# OpenCL functions in the form:
#
# convert_<destTypen><_sat><_roundingMode>(<sourceTypen>)
types = ['char', 'uchar', 'short', 'ushort', 'int', 'uint', 'long', 'ulong', 'float', 'double']
int_types = ['char', 'uchar', 'short', 'ushort', 'int', 'uint', 'long', 'ulong']
unsigned_types = ['uchar', 'ushort', 'uint', 'ulong']
float_types = ['float', 'double']
int64_types = ['long', 'ulong']
float64_types = ['double']
vector_sizes = ['', '2', '3', '4', '8', '16']
half_sizes = [('2',''), ('4','2'), ('8','4'), ('16','8')]
saturation = ['','_sat']
rounding_modes = ['_rtz','_rte','_rtp','_rtn']
float_prefix = {'float':'FLT_', 'double':'DBL_'}
float_suffix = {'float':'f', 'double':''}
bool_type = {'char' : 'char',
'uchar' : 'char',
'short' : 'short',
'ushort': 'short',
'int' : 'int',
'uint' : 'int',
'long' : 'long',
'ulong' : 'long',
'float' : 'int',
'double' : 'long'}
unsigned_type = {'char' : 'uchar',
'uchar' : 'uchar',
'short' : 'ushort',
'ushort': 'ushort',
'int' : 'uint',
'uint' : 'uint',
'long' : 'ulong',
'ulong' : 'ulong'}
sizeof_type = {'char' : 1, 'uchar' : 1,
'short' : 2, 'ushort' : 2,
'int' : 4, 'uint' : 4,
'long' : 8, 'ulong' : 8,
'float' : 4, 'double' : 8}
limit_max = {'char' : 'CHAR_MAX',
'uchar' : 'UCHAR_MAX',
'short' : 'SHRT_MAX',
'ushort': 'USHRT_MAX',
'int' : 'INT_MAX',
'uint' : 'UINT_MAX',
'long' : 'LONG_MAX',
'ulong' : 'ULONG_MAX'}
limit_min = {'char' : 'CHAR_MIN',
'uchar' : '0',
'short' : 'SHRT_MIN',
'ushort': '0',
'int' : 'INT_MIN',
'uint' : '0',
'long' : 'LONG_MIN',
'ulong' : '0'}
def conditional_guard(src, dst):
int64_count = 0
float64_count = 0
if src in int64_types:
int64_count = int64_count +1
elif src in float64_types:
float64_count = float64_count + 1
if dst in int64_types:
int64_count = int64_count +1
elif dst in float64_types:
float64_count = float64_count + 1
if float64_count > 0 and int64_count > 0:
print("#if defined(cl_khr_fp64) && defined(cles_khr_int64)")
return True
elif float64_count > 0:
print("#ifdef cl_khr_fp64")
return True
elif int64_count > 0:
print("#ifdef cles_khr_int64")
return True
return False
print("""/* !!!! AUTOGENERATED FILE generated by convert_type.py !!!!!
DON'T CHANGE THIS FILE. MAKE YOUR CHANGES TO convert_type.py AND RUN:
$ ./generate-conversion-type-cl.sh
OpenCL type conversion functions
Copyright (c) 2013 Victor Oliveira <victormatheus@gmail.com>
Copyright (c) 2013 Jesse Towner <jessetowner@lavabit.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
""")
#
# Default Conversions
#
# All conversions are in accordance with the OpenCL specification,
# which cites the C99 conversion rules.
#
# Casting from floating point to integer results in conversions
# with truncation, so it should be suitable for the default convert
# functions.
#
# Conversions from integer to floating-point, and floating-point to
# floating-point through casting is done with the default rounding
# mode. While C99 allows dynamically changing the rounding mode
# during runtime, it is not a supported feature in OpenCL according
# to Section 7.1 - Rounding Modes in the OpenCL 1.2 specification.
#
# Therefore, we can assume for optimization purposes that the
# rounding mode is fixed to round-to-nearest-even. Platform target
# authors should ensure that the rounding-control registers remain
# in this state, and that this invariant holds.
#
# Also note, even though the OpenCL specification isn't entirely
# clear on this matter, we implement all rounding mode combinations
# even for integer-to-integer conversions. When such a conversion
# is used, the rounding mode is ignored.
#
def generate_default_conversion(src, dst, mode):
close_conditional = conditional_guard(src, dst)
# scalar conversions
print("""_CL_ALWAYSINLINE _CL_OVERLOADABLE
{DST} convert_{DST}{M}({SRC} x)
{{
return ({DST})x;
}}
""".format(SRC=src, DST=dst, M=mode))
# vector conversions, done through decomposition to components
for size, half_size in half_sizes:
print("""_CL_ALWAYSINLINE _CL_OVERLOADABLE
{DST}{N} convert_{DST}{N}{M}({SRC}{N} x)
{{
return ({DST}{N})(convert_{DST}{H}(x.lo), convert_{DST}{H}(x.hi));
}}
""".format(SRC=src, DST=dst, N=size, H=half_size, M=mode))
# 3-component vector conversions
print("""_CL_ALWAYSINLINE _CL_OVERLOADABLE
{DST}3 convert_{DST}3{M}({SRC}3 x)
{{
return ({DST}3)(convert_{DST}2(x.s01), convert_{DST}(x.s2));
}}""".format(SRC=src, DST=dst, M=mode))
if close_conditional:
print("#endif")
print()
for src in types:
for dst in types:
generate_default_conversion(src, dst, '')
for src in int_types:
for dst in int_types:
for mode in rounding_modes:
generate_default_conversion(src, dst, mode)
#
# Saturated Conversions To Integers
#
# These functions are dependent on the unsaturated conversion functions
# generated above, and use clamp, max, min, and select to eliminate
# branching and vectorize the conversions.
#
# Again, as above, we allow all rounding modes for integer-to-integer
# conversions with saturation.
#
def generate_saturated_conversion(src, dst, size):
# Header
print()
close_conditional = conditional_guard(src, dst)
print("""_CL_ALWAYSINLINE _CL_OVERLOADABLE
{DST}{N} convert_{DST}{N}_sat({SRC}{N} x)
{{""".format(DST=dst, SRC=src, N=size))
# FIXME: This is a work around for lack of select function with
# signed third argument when the first two arguments are unsigned types.
# We cast to the signed type for sign-extension, then do a bitcast to
# the unsigned type.
if dst in unsigned_types:
bool_prefix = "as_{DST}{N}(convert_{BOOL}{N}".format(DST=dst, BOOL=bool_type[dst], N=size);
bool_suffix = ")"
else:
bool_prefix = "convert_{BOOL}{N}".format(BOOL=bool_type[dst], N=size);
bool_suffix = ""
# Body
if src == dst:
# Conversion between same types
print(" return x;")
elif src in float_types:
# Conversion from float to int
print(""" {DST}{N} y = convert_{DST}{N}(x);
y = select(y, ({DST}{N}){DST_MIN}, {BP}(x < ({SRC}{N}){DST_MIN}){BS});
y = select(y, ({DST}{N}){DST_MAX}, {BP}(x > ({SRC}{N}){DST_MAX}){BS});
return y;""".format(SRC=src, DST=dst, N=size,
DST_MIN=limit_min[dst], DST_MAX=limit_max[dst],
BP=bool_prefix, BS=bool_suffix))
else:
# Integer to integer convesion with sizeof(src) == sizeof(dst)
if sizeof_type[src] == sizeof_type[dst]:
if src in unsigned_types:
print(" x = min(x, ({SRC}){DST_MAX});".format(SRC=src, DST_MAX=limit_max[dst]))
else:
print(" x = max(x, ({SRC})0);".format(SRC=src))
# Integer to integer conversion where sizeof(src) > sizeof(dst)
elif sizeof_type[src] > sizeof_type[dst]:
if src in unsigned_types:
print(" x = min(x, ({SRC}){DST_MAX});".format(SRC=src, DST_MAX=limit_max[dst]))
else:
print(" x = clamp(x, ({SRC}){DST_MIN}, ({SRC}){DST_MAX});"
.format(SRC=src, DST_MIN=limit_min[dst], DST_MAX=limit_max[dst]))
# Integer to integer conversion where sizeof(src) < sizeof(dst)
elif src not in unsigned_types and dst in unsigned_types:
print(" x = max(x, ({SRC})0);".format(SRC=src))
print(" return convert_{DST}{N}(x);".format(DST=dst, N=size))
# Footer
print("}")
if close_conditional:
print("#endif")
for src in types:
for dst in int_types:
for size in vector_sizes:
generate_saturated_conversion(src, dst, size)
def generate_saturated_conversion_with_rounding(src, dst, size, mode):
# Header
print()
close_conditional = conditional_guard(src, dst)
# Body
print("""_CL_ALWAYSINLINE _CL_OVERLOADABLE
{DST}{N} convert_{DST}{N}_sat{M}({SRC}{N} x)
{{
return convert_{DST}{N}_sat(x);
}}
""".format(DST=dst, SRC=src, N=size, M=mode))
# Footer
if close_conditional:
print("#endif")
for src in int_types:
for dst in int_types:
for size in vector_sizes:
for mode in rounding_modes:
generate_saturated_conversion_with_rounding(src, dst, size, mode)
#
# Conversions To/From Floating-Point With Rounding
#
# Note that we assume as above that casts from floating-point to
# integer are done with truncation, and that the default rounding
# mode is fixed to round-to-nearest-even, as per C99 and OpenCL
# rounding rules.
#
# These functions rely on the use of abs, ceil, fabs, floor,
# nextafter, sign, rint and the above generated conversion functions.
#
# Only conversions to integers can have saturation.
#
def generate_float_conversion(src, dst, size, mode, sat):
# Header
print()
close_conditional = conditional_guard(src, dst)
print("""_CL_ALWAYSINLINE _CL_OVERLOADABLE
{DST}{N} convert_{DST}{N}{S}{M}({SRC}{N} x)
{{""".format(SRC=src, DST=dst, N=size, M=mode, S=sat))
# Perform conversion
if dst in int_types:
if mode == '_rte':
print(" x = rint(x);");
elif mode == '_rtp':
print(" x = ceil(x);");
elif mode == '_rtn':
print(" x = floor(x);");
print(" return convert_{DST}{N}{S}(x);".format(DST=dst, N=size, S=sat))
elif mode == '_rte':
print(" return convert_{DST}{N}(x);".format(DST=dst, N=size))
else:
print(" {DST}{N} r = convert_{DST}{N}(x);".format(DST=dst, N=size))
print(" {SRC}{N} y = convert_{SRC}{N}(y);".format(SRC=src, N=size))
if mode == '_rtz':
if src in int_types:
print(" {USRC}{N} abs_x = abs(x);".format(USRC=unsigned_type[src], N=size))
print(" {USRC}{N} abs_y = abs(y);".format(USRC=unsigned_type[src], N=size))
else:
print(" {SRC}{N} abs_x = fabs(x);".format(SRC=src, N=size))
print(" {SRC}{N} abs_y = fabs(y);".format(SRC=src, N=size))
print(" return select(r, nextafter(r, sign(r) * ({DST}{N})-INFINITY), convert_{BOOL}{N}(abs_y > abs_x));"
.format(DST=dst, N=size, BOOL=bool_type[dst]))
if mode == '_rtp':
print(" return select(r, nextafter(r, ({DST}{N})INFINITY), convert_{BOOL}{N}(y < x));"
.format(DST=dst, N=size, BOOL=bool_type[dst]))
if mode == '_rtn':
print(" return select(r, nextafter(r, ({DST}{N})-INFINITY), convert_{BOOL}{N}(y > x));"
.format(DST=dst, N=size, BOOL=bool_type[dst]))
# Footer
print("}")
if close_conditional:
print("#endif")
for src in float_types:
for dst in int_types:
for size in vector_sizes:
for mode in rounding_modes:
for sat in saturation:
generate_float_conversion(src, dst, size, mode, sat)
for src in types:
for dst in float_types:
for size in vector_sizes:
for mode in rounding_modes:
generate_float_conversion(src, dst, size, mode, '')
|
NatTuck/pocl
|
lib/kernel/convert_type.py
|
Python
|
mit
| 13,471
|
# Copyright (C) 2015 Will Bond, Mjumbe Wawatu Ukweli, 2011 by Yehuda Katz
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""A port of the acceptance test for handlebars.js."""
from unittest import TestCase
try:
str_class = unicode
except NameError:
# Python 3 support
str_class = str
import sys
import os
import pybars
from pybars import (
strlist,
Scope,
PybarsError,
Compiler
)
from .test__compiler import render
class TestAcceptance(TestCase):
def assertRender(self, template, context, result, helpers=None, partials=None, error=None, **kwargs):
try:
self.assertEqual(result, render(template, context, helpers=helpers, partials=partials, **kwargs))
except PybarsError, e:
self.assertEqual(e.message, error)
else:
if error:
self.assertTrue(False, "Was expecting an error: {}".format(error))
def test_basic_context(self):
template = u"Goodbye\n{{cruel}}\n{{world}}!"
context = {
'cruel': "cruel",
'world': "world"
}
result = "Goodbye\ncruel\nworld!"
self.assertRender(template, context, result)
def test_comments_ignored(self):
template = u"{{! Goodbye}}Goodbye\n{{cruel}}\n{{world}}!"
context = {
'cruel': "cruel",
'world': "world"
}
result = "Goodbye\ncruel\nworld!"
self.assertRender(template, context, result)
def test_booleans(self):
template = u"{{#goodbye}}GOODBYE {{/goodbye}}cruel {{world}}!"
context = {
'goodbye': True,
'world': 'world'
}
result = "GOODBYE cruel world!"
self.assertRender(template, context, result)
context = {
'goodbye': False,
'world': 'world'
}
result = "cruel world!"
self.assertRender(template, context, result)
def test_zeros(self):
template = u"num1: {{num1}}, num2: {{num2}}"
context = {
'num1': 42,
'num2': 0
}
result = "num1: 42, num2: 0"
self.assertRender(template, context, result)
template = u"num: {{.}}"
context = 0
result = "num: 0"
self.assertRender(template, context, result)
template = u"num: {{num1/num2}}"
context = {
'num1': {
'num2': 0
}
}
result = "num: 0"
self.assertRender(template, context, result)
def test_negative_int_literal(self):
helpers = {
'type': lambda s, v: type(v).__name__
}
template = u"{{type \"string\"}} {{type 1}} {{type -1}}"
result = "str int int"
self.assertRender(template, None, result, helpers)
helpers = {
'echo': lambda s, v: str(v)
}
template = u"{{echo \"string\"}} {{echo 1}} {{echo -1}}"
result = "string 1 -1"
self.assertRender(template, None, result, helpers)
def test_newlines(self):
template = u"Alan's\nTest"
result = u"Alan's\nTest"
self.assertRender(template, None, result)
template = u"Alan's\rTest"
result = u"Alan's\rTest"
self.assertRender(template, None, result)
def test_escaping_text(self):
template = u"Awesome's"
result = u"Awesome's"
self.assertRender(template, None, result)
template = u"Awesome\\"
result = u"Awesome\\"
self.assertRender(template, None, result)
template = u"Awesome\\\\ foo"
result = u"Awesome\\\\ foo"
self.assertRender(template, None, result)
template = u"Awesome {{foo}}"
context = {
'foo': '\\'
}
result = u"Awesome \\"
self.assertRender(template, context, result)
template = u' " " '
result = u' " " '
self.assertRender(template, None, result)
def test_escaping_expressions(self):
template = u"{{{awesome}}}"
context = {
'awesome': "&\"\\<>"
}
result = '&\"\\<>'
self.assertRender(template, context, result)
template = u"{{&awesome}}"
context = {
'awesome': "&\"\\<>"
}
result = '&\"\\<>'
self.assertRender(template, context, result)
template = u"{{awesome}}"
context = {
'awesome': "&\"'`\\<>"
}
result = u'&"'`\\<>'
self.assertRender(template, context, result)
def test_functions_returning_safestrings(self):
template = u'{{awesome}}'
result = '&\"\\<>'
# Note that we use strlist for our safestring implementation.
context = {
'awesome': lambda this: strlist([result])
}
self.assertRender(template, context, result)
def test_functions_called(self):
result = 'Awesome'
template = u'{{awesome}}'
context = {
'awesome': lambda this: result
}
self.assertRender(template, context, result)
template = u'{{{awesome}}}'
context = {
'awesome': lambda this: result
}
self.assertRender(template, context, result)
def test_functions_can_take_context_arguments(self):
def awesome(this, context):
return context
template = u"{{awesome frank}}"
context = {
'awesome': awesome,
'frank': 'Frank'
}
result = "Frank"
self.assertRender(template, context, result)
template = u"{{{awesome frank}}}"
context = {
'awesome': awesome,
'frank': 'Frank'
}
result = "Frank"
self.assertRender(template, context, result)
def test_paths_can_contain_hyphens(self):
template = u"{{foo-bar}}"
context = {
"foo-bar": "baz"
}
result = u"baz"
self.assertRender(template, context, result)
def test_nested_paths_access_nested_objects(self):
template = u"Goodbye {{alan/expression}} world!"
context = {
'alan': {
'expression': 'beautiful'
}
}
result = u"Goodbye beautiful world!"
self.assertRender(template, context, result)
def test_nested_paths_to_empty_string_renders(self):
template = u"Goodbye {{alan/expression}} world!"
context = {
'alan': {
'expression': ''
}
}
result = u"Goodbye world!"
self.assertRender(template, context, result)
def test_literal_paths_can_be_used(self):
template = u"Goodbye {{[@alan]/expression}} world!"
context = {
'@alan': {
'expression': 'beautiful'
}
}
result = u"Goodbye beautiful world!"
self.assertRender(template, context, result)
def test_nested_paths(self):
template = u"{{#goodbyes}}{{.././world}} {{/goodbyes}}"
context = {
'goodbyes': [
{'text': "goodbye"},
{'text': "Goodbye"},
{'text': "GOODBYE"}
],
'world': "world"
}
result = "world world world "
self.assertRender(template, context, result)
def test_current_context_does_not_invoke_helpers(self):
helpers = {'helper': "notcallable"}
template = u"test: {{.}}"
result = "test: "
self.assertRender(template, None, result, helpers)
def test_complex_but_empty_paths(self):
template = u"{{person/name}}"
context = {
'person': {
'name': None
}
}
result = ""
self.assertRender(template, context, result)
template = u"{{person/name}}"
context = {
'person': {}
}
result = ""
self.assertRender(template, context, result)
def test_this_keyword_in_paths_simple(self):
template = u"{{#goodbyes}}{{this}}{{/goodbyes}}"
context = {
'goodbyes': [
"goodbye",
"Goodbye",
"GOODBYE"
]
}
result = "goodbyeGoodbyeGOODBYE"
self.assertRender(template, context, result)
def test_this_keyword_in_paths_complex(self):
template = u"{{#hellos}}{{this/text}}{{/hellos}}"
context = {
'hellos': [
{'text': 'hello'},
{'text': 'Hello'},
{'text': "HELLO"}
]
}
result = "helloHelloHELLO"
self.assertRender(template, context, result)
def test_inverted_sections(self):
# We use this form to not introduce extra whitespace
template = (
u"{{#goodbyes}}{{this}}{{/goodbyes}}"
u"{{^goodbyes}}Right On!{{/goodbyes}}"
)
result = "Right On!"
context = {}
self.assertRender(template, context, result)
context = {'goodbyes': False}
self.assertRender(template, context, result)
context = {'goodbyes': []}
self.assertRender(template, context, result)
def test_inverted_alternate_sections(self):
# We use this form to not introduce extra whitespace
template = (
u"{{#goodbyes}}{{this}}{{else}}Right On!{{/goodbyes}}\n"
u"{{^goodbyes}}Right On!{{else}}{{this}}{{/goodbyes}}"
)
result = "Right On!\nRight On!"
context = {}
self.assertRender(template, context, result)
context = {'goodbyes': False}
self.assertRender(template, context, result)
context = {'goodbyes': []}
self.assertRender(template, context, result)
def test_array_iteration(self):
template = u"{{#goodbyes}}{{text}}! {{/goodbyes}}cruel {{world}}!"
context = {
'goodbyes': [
{'text': "goodbye"},
{'text': "Goodbye"},
{'text': "GOODBYE"}
],
'world': "world"
}
result = "goodbye! Goodbye! GOODBYE! cruel world!"
self.assertRender(template, context, result)
context = {
'goodbyes': [],
'world': "world"
}
result = "cruel world!"
self.assertRender(template, context, result)
def test_empty_block(self):
template = u"{{#goodbyes}}{{/goodbyes}}cruel {{world}}!"
context = {
'goodbyes': [
{'text': "goodbye"},
{'text': "Goodbye"},
{'text': "GOODBYE"}
],
'world': "world"
}
result = "cruel world!"
self.assertRender(template, context, result)
context = {
'goodbyes': [],
'world': "world"
}
result = "cruel world!"
self.assertRender(template, context, result)
def test_nested_iteration(self):
# Empty upstream
pass
def test_block_with_complex_lookup(self):
template = u"{{#goodbyes}}{{text}} cruel {{../name}}! {{/goodbyes}}"
context = {
'name': "Alan",
'goodbyes': [
{'text': "goodbye"},
{'text': "Goodbye"},
{'text': "GOODBYE"}
]
}
result = "goodbye cruel Alan! Goodbye cruel Alan! GOODBYE cruel Alan! "
self.assertRender(template, context, result)
def test_parent_lookup(self):
template = u"{{#goodbyes}}{{text}} cruel {{@_parent.name}}! {{/goodbyes}}"
context = {
'name': "Alan",
'goodbyes': [
{'text': "goodbye"},
{'text': "Goodbye"},
{'text': "GOODBYE"}
]
}
result = "goodbye cruel Alan! Goodbye cruel Alan! GOODBYE cruel Alan! "
self.assertRender(template, context, result)
def test_helper_with_complex_lookup(self):
def link(this, prefix):
return (u"<a href='" + prefix + u"/" +
this.get('url') + u"'>" +
this.get('text') + u"</a>")
helpers = {'link': link}
template = u"{{#goodbyes}}{{{link ../prefix}}}{{/goodbyes}}"
context = {
'prefix': "/root",
'goodbyes': [
{
'text': "Goodbye",
'url': "goodbye"
}
]
}
result = "<a href='/root/goodbye'>Goodbye</a>"
self.assertRender(template, context, result, helpers)
def test_helper_block_with_complex_lookup(self):
def goodbyes(this, options):
result = strlist()
for bye in ["Goodbye", "goodbye", "GOODBYE"]:
result.grow(bye)
result.grow(' ')
result.grow(options['fn'](this))
result.grow("! ")
return result
helpers = {'goodbyes': goodbyes}
template = u"{{#goodbyes}}{{../name}}{{/goodbyes}}"
context = {
'name': "Alan"
}
result = "Goodbye Alan! goodbye Alan! GOODBYE Alan! "
self.assertRender(template, context, result, helpers)
def test_helper_with_complex_lookup_and_nested_template(self):
def link(this, options, prefix):
return u"<a href='" + str_class(prefix) + u"/" + this['url'] + u"'>" + str_class(options['fn'](this)) + u"</a>"
helpers = {'link': link}
template = u"{{#goodbyes}}{{#link ../prefix}}{{text}}{{/link}}{{/goodbyes}}"
context = {
'prefix': '/root',
'goodbyes': [
{
'text': "Goodbye",
'url': "goodbye"
}
]
}
result = u"<a href='/root/goodbye'>Goodbye</a>"
self.assertRender(template, context, result, helpers)
def test_block_helper_with_deep_nested_lookup(self):
def para(this, options, value):
param = Scope(value, this, options['root'])
if value:
return strlist(['<p>']) + options['fn'](param) + strlist(['</p>'])
helpers = {'para': para}
template = (
u"{{#para nested}}Goodbye "
u"{{#if ../world}}cruel {{../world}}{{/if}}{{/para}}"
)
context = {
'world': "world!",
'nested': True
}
result = u"<p>Goodbye cruel world!</p>"
self.assertRender(template, context, result, helpers)
def test_block_with_deep_nested_complex_lookup(self):
template = (
u"{{#outer}}Goodbye "
u"{{#inner}}cruel {{../../omg}}{{/inner}}{{/outer}}"
)
context = {
'omg': "OMG!",
'outer': [
{
'inner': [
{'text': "goodbye"}
]
}
]
}
result = u"Goodbye cruel OMG!"
self.assertRender(template, context, result)
def test_root_lookup(self):
template = (
u"{{#outer}}Goodbye "
u"{{#inner}}cruel {{@root.top}}{{/inner}}{{/outer}}"
)
context = {
'top': "world",
'outer': [
{
'inner': [
{'text': "goodbye"}
]
}
]
}
result = u"Goodbye cruel world"
self.assertRender(template, context, result)
def test_block_helper(self):
def goodbyes(this, options):
return options['fn']({'text': "GOODBYE"})
helpers = {'goodbyes': goodbyes}
template = u"{{#goodbyes}}{{text}}! {{/goodbyes}}cruel {{world}}!"
context = {
'world': "world"
}
result = u"GOODBYE! cruel world!"
self.assertRender(template, context, result, helpers)
def test_block_helper_staying_in_the_same_context(self):
def form(this, options):
return strlist([u"<form>", options['fn'](this), u"</form>"])
helpers = {'form': form}
template = u"{{#form}}<p>{{name}}</p>{{/form}}"
context = {
'name': "Yehuda"
}
result = "<form><p>Yehuda</p></form>"
self.assertRender(template, context, result, helpers)
def test_block_helper_should_have_context_in_this(self):
def link(this, options):
return strlist((
'<a href="/people/', str_class(this['id']), '">',
options['fn'](this),
'</a>'
))
helpers = {'link': link}
template = u"<ul>{{#people}}<li>{{#link}}{{name}}{{/link}}</li>{{/people}}</ul>"
context = {
"people": [
{
"name": "Alan",
"id": 1
},
{
"name": "Yehuda",
"id": 2
}
]
}
result = (
"<ul><li><a href=\"/people/1\">Alan</a></li>"
"<li><a href=\"/people/2\">Yehuda</a></li></ul>"
)
self.assertRender(template, context, result, helpers)
def test_block_helper_for_undefined_value(self):
template = u"{{#empty}}shouldn't render{{/empty}}"
context = {}
result = ''
self.assertRender(template, context, result)
def test_block_helper_passing_a_new_context(self):
def form(this, options, context):
return "<form>" + str_class(options['fn'](context)) + '</form>'
helpers = {'form': form}
template = u"{{#form yehuda}}<p>{{name}}</p>{{/form}}"
context = {
'yehuda': {
'name': "Yehuda"
}
}
result = u"<form><p>Yehuda</p></form>"
self.assertRender(template, context, result, helpers)
def test_block_helper_passing_a_complex_path_context(self):
def form(this, options, context):
return u"<form>" + str_class(options['fn'](context)) + u"</form>"
helpers = {'form': form}
template = u"{{#form yehuda/cat}}<p>{{name}}</p>{{/form}}"
context = {
'yehuda': {
'name': "Yehuda",
'cat': {
'name': "Harold"
}
}
}
result = "<form><p>Harold</p></form>"
self.assertRender(template, context, result, helpers)
def test_subexpression(self):
def para(this, options, values_dict):
return strlist(u'<p>') + options['fn'](values_dict) + strlist(u'</p>')
def fold(this, key, val):
return {key: val}
helpers = {
'para': para,
'fold': fold
}
template = u"{{#para (fold 'foo' val)}}{{foo}}{{/para}}"
context = {
'val': 'bar'
}
result = "<p>bar</p>"
self.assertRender(template, context, result, helpers)
def test_nested_subexpression(self):
def para(this, options, values_dict):
return strlist(u'<p>') + options['fn'](values_dict) + strlist(u'</p>')
def fold(this, key, val):
return {key: val}
def add(this, num1, num2):
return num1 + num2
helpers = {
'para': para,
'fold': fold,
'add': add
}
template = u"{{#para (fold 'foo' (add val 1))}}{{foo}}{{/para}}"
context = {
'val': 1
}
result = "<p>2</p>"
self.assertRender(template, context, result, helpers)
def test_subexpression_containing_keyword(self):
def para(this, options, values_dict):
return strlist(u'<p>') + options['fn'](values_dict) + strlist(u'</p>')
def fold2(this, key, value=None):
return {key: value}
helpers = {
'para': para,
'fold2': fold2
}
template = u"{{#para (fold2 'foo' value=val)}}{{foo}}{{/para}}"
context = {
'val': 'bar'
}
result = "<p>bar</p>"
self.assertRender(template, context, result, helpers)
def test_subexpression_as_keyword(self):
def para2(this, options, blah=None, values_dict=None):
return strlist(u'<p>') + options['fn'](values_dict) + strlist(u'</p>')
def fold2(this, key, value=None):
return {key: value}
helpers = {
'para2': para2,
'fold2': fold2
}
template = u"{{#para2 values_dict=(fold2 'foo' value=val)}}{{foo}}{{/para2}}"
context = {
'val': 'bar'
}
result = "<p>bar</p>"
self.assertRender(template, context, result, helpers)
def test_nested_block_helpers(self):
def link(this, options):
return (
"<a href='" + this['name'] + "'>" +
str_class(options['fn'](this)) + "</a>")
def form(this, options, context):
return "<form>" + str_class(options['fn'](context)) + "</form>"
helpers = {
'link': link,
'form': form
}
template = u"{{#form yehuda}}<p>{{name}}</p>{{#link}}Hello{{/link}}{{/form}}"
context = {
'yehuda': {
'name': "Yehuda"
}
}
result = "<form><p>Yehuda</p><a href='Yehuda'>Hello</a></form>"
self.assertRender(template, context, result, helpers)
def test_block_inverted_sections(self):
template = u"{{#people}}{{name}}{{^}}{{none}}{{/people}}"
context = {'none': "No people"}
result = "No people"
self.assertRender(template, context, result)
def test_block_inverted_sections_with_empty_arrays(self):
template = u"{{#people}}{{name}}{{^}}{{none}}{{/people}}"
context = {
'none': "No people",
'people': []
}
result = "No people"
self.assertRender(template, context, result)
def test_block_helper_inverted_sections(self):
def list(this, options, context):
if len(context):
out = "<ul>"
for thing in context:
out += "<li>"
out += str_class(options['fn'](thing))
out += "</li>"
out += "</ul>"
return out
else:
return "<p>" + str_class(options['inverse'](this)) + "</p>"
helpers = {'list': list}
template = u"{{#list people}}{{name}}{{^}}<em>Nobody's here</em>{{/list}}"
context = {
'people': [
{'name': "Alan"},
{'name': "Yehuda"}
]
}
result = "<ul><li>Alan</li><li>Yehuda</li></ul>"
self.assertRender(template, context, result, helpers)
context = {
'people': []
}
result = "<p><em>Nobody's here</em></p>"
self.assertRender(template, context, result, helpers)
template = u"{{#list people}}{{name}}{{else}}<em>Nobody's here</em>{{/list}}"
context = {
'people': [
{'name': "Alan"},
{'name': "Yehuda"}
]
}
result = "<ul><li>Alan</li><li>Yehuda</li></ul>"
self.assertRender(template, context, result, helpers)
context = {
'people': []
}
result = "<p><em>Nobody's here</em></p>"
self.assertRender(template, context, result, helpers)
template = u"{{#list people}}Hello{{^}}{{message}}{{/list}}"
context = {
'people': [],
'message': "Nobody's here"
}
result = "<p>Nobody's here</p>"
self.assertRender(template, context, result, helpers)
def test_providing_a_helpers_hash(self):
helpers = {'world': "world"}
template = u"Goodbye {{cruel}} {{world}}!"
context = {
'cruel': "cruel"
}
result = "Goodbye cruel world!"
self.assertRender(template, context, result, helpers)
template = u"Goodbye {{#iter}}{{cruel}} {{world}}{{/iter}}!"
context = {
'iter': [
{'cruel': "cruel"}
]
}
result = "Goodbye cruel world!"
self.assertRender(template, context, result, helpers)
def test_in_cases_of_conflict_helpers_before_context(self):
helpers = {'lookup': 'helpers'}
template = u"{{lookup}}"
context = {
'lookup': 'Explicit'
}
result = "helpers"
self.assertRender(template, context, result, helpers)
helpers = {'lookup': 'helpers'}
template = u"{{{lookup}}}"
context = {
'lookup': 'Explicit'
}
result = "helpers"
self.assertRender(template, context, result, helpers)
helpers = {'lookup': [{}]}
template = u"{{#lookup}}Explicit{{/lookup}}"
context = {
'lookup': []
}
result = "Explicit"
self.assertRender(template, context, result, helpers)
def test_the_helpers_hash_is_available_is_nested_contexts(self):
helpers = {'helper': 'helper'}
template = u"{{#outer}}{{#inner}}{{helper}}{{/inner}}{{/outer}}"
context = {'outer': {'inner': {'unused': []}}}
result = "helper"
self.assertRender(template, context, result, helpers)
def test_basic_partials(self):
partials = {
'dude': u"{{name}} ({{url}}) "
}
template = u"Dudes: {{#dudes}}{{> dude}}{{/dudes}}"
context = {
'dudes': [
{
'name': "Yehuda",
'url': "http://yehuda"
},
{
'name': "Alan",
'url': "http://alan"
}
]
}
result = "Dudes: Yehuda (http://yehuda) Alan (http://alan) "
self.assertRender(template, context, result, None, partials)
def test_partials_with_context(self):
partials = {
'dude': u"{{#this}}{{name}} ({{url}}) {{/this}}"
}
template = u"Dudes: {{>dude dudes}}"
context = {
'dudes': [
{
'name': "Yehuda",
'url': "http://yehuda"
},
{
'name': "Alan",
'url': "http://alan"
}
]
}
result = "Dudes: Yehuda (http://yehuda) Alan (http://alan) "
self.assertRender(template, context, result, None, partials)
def test_partials_too_many_args(self):
partials = {
'dude': u"{{#this}}{{name}} ({{url}}) {{/this}}"
}
template = u'Dudes: {{>dude dudes "extra"}}'
context = {
'dudes': [
{
'name': "Yehuda",
'url': "http://yehuda"
},
{
'name': "Alan",
'url': "http://alan"
}
]
}
self.assertRaises(PybarsError, render, template, context, partials=partials)
def test_partials_kwargs(self):
partials = {
'dude': u"{{name}} ({{url}}) "
}
template = u'Dudes: {{#dudes}}{{>dude url="http://example"}}{{/dudes}}'
context = {
'dudes': [
{
'name': "Yehuda",
'url': "http://yehuda"
},
{
'name': "Alan",
'url': "http://alan"
}
]
}
result = "Dudes: Yehuda (http://example) Alan (http://example) "
self.assertRender(template, context, result, None, partials)
def test_partial_in_a_partial(self):
partials = {
'dude': u"{{name}} {{> url}} ",
'url': u"<a href='{{url}}'>{{url}}</a>"
}
template = u"Dudes: {{#dudes}}{{>dude}}{{/dudes}}"
context = {
'dudes': [
{
'name': "Yehuda",
'url': "http://yehuda"
},
{
'name': "Alan",
'url': "http://alan"
}
]
}
result = (
"Dudes: Yehuda <a href='http://yehuda'>http://yehuda</a>"
" Alan <a href='http://alan'>http://alan</a> "
)
self.assertRender(template, context, result, None, partials)
def test_rendering_undefined_partial_throws_an_exception(self):
template = u"{{> whatever}}"
context = {}
self.assertRaises(PybarsError, render, template, context)
def test_root_nested_partial(self):
partials = {
'dude': u"{{name}} {{> url}} ",
'url': u"<a href='{{url}}' target='{{@root.target}}'>{{url}}</a>"
}
template = u"Dudes: {{#dudes}}{{>dude}}{{/dudes}}"
context = {
'target': '_blank',
'dudes': [
{
'name': "Yehuda",
'url': "http://yehuda"
},
{
'name': "Alan",
'url': "http://alan"
}
]
}
result = (
"Dudes: Yehuda <a href='http://yehuda' target='_blank'>http://yehuda</a>"
" Alan <a href='http://alan' target='_blank'>http://alan</a> "
)
self.assertRender(template, context, result, None, partials)
def test_GH_14_a_partial_preceding_a_selector(self):
partials = {
'dude': u"{{name}}"
}
template = u"Dudes: {{>dude}} {{another_dude}}"
context = {
'name': "Jeepers",
'another_dude': "Creepers"
}
result = "Dudes: Jeepers Creepers"
self.assertRender(template, context, result, None, partials)
def test_partials_with_literal_paths(self):
partials = {
'dude': u"{{name}}"
}
template = u"Dudes: {{> [dude]}}"
context = {
'name': "Jeepers",
'another_dude': "Creepers"
}
result = "Dudes: Jeepers"
self.assertRender(template, context, result, None, partials)
def test_partials_with_string(self):
partials = {
'+404/asdf?.bar': u"{{name}}"
}
template = u'Dudes: {{> "+404/asdf?.bar"}}'
context = {
'name': "Jeepers",
'another_dude': "Creepers"
}
result = "Dudes: Jeepers"
self.assertRender(template, context, result, None, partials)
def test_simple_literals_work(self):
def hello(this, param, times, bool1, bool2):
self.assertEqual(True, bool1)
self.assertEqual(False, bool2)
self.assertEqual(12, times)
return ("Hello " + param + " " + str_class(times) + " times: " +
str_class(bool1) + " " + str_class(bool2))
helpers = {'hello': hello}
template = u'Message: {{hello "world" 12 true false}}'
context = {}
result = "Message: Hello world 12 times: True False"
self.assertRender(template, context, result, helpers)
def test_true(self):
template = u"{{var}}"
context = {
'var': True
}
result = "true"
self.assertRender(template, context, result)
def test_true_unescaped(self):
template = u"{{{var}}}"
context = {
'var': True
}
result = "true"
self.assertRender(template, context, result)
def test_false(self):
template = u"{{var}}"
context = {
'var': False
}
result = "false"
self.assertRender(template, context, result)
def test_false_unescaped(self):
template = u"{{{var}}}"
context = {
'var': False
}
result = "false"
self.assertRender(template, context, result)
def test_none(self):
template = u"{{var}}"
context = {
'var': None
}
result = ""
self.assertRender(template, context, result)
def test_none_unescaped(self):
template = u"{{{var}}}"
context = {
'var': None
}
result = ""
self.assertRender(template, context, result)
def test_null(self):
def hello(this, param):
return "Hello " + ('' if param is None else param)
helpers = {'hello': hello}
template = u"Message: {{{hello null}}}"
context = {}
result = "Message: Hello "
self.assertRender(template, context, result, helpers)
def test_undefined(self):
def hello(this, param):
return "Hello " + ('' if param is None else param)
helpers = {'hello': hello}
template = u"Message: {{{hello undefined}}}"
context = {}
result = "Message: Hello "
self.assertRender(template, context, result, helpers)
def test_block_tag_whitespace(self):
template = u" {{#if var}}\n {{var}}\n {{/if}}"
context = {
'var': 'Hello'
}
result = " Hello"
self.assertRender(template, context, result)
template = u"{{#if var}} \n {{var}}\n {{/if}} "
self.assertRender(template, context, result)
template = u"{{#if var}}\n {{var}}\n{{/if}} "
self.assertRender(template, context, result)
template = u"{{#if var}}\n {{var}}\n{{/if}}"
self.assertRender(template, context, result)
template = u"{{#if var}} \r\n {{var}}\r\n {{/if}} "
self.assertRender(template, context, result)
template = u"\n{{#if var}}\n {{var}}\n{{/if}}"
result = "\n Hello"
self.assertRender(template, context, result)
def test_using_a_quote_in_the_middle_of_a_parameter_raises_an_error(self):
template = u'Message: {{hello wo"a"}}'
context = None
self.assertRaises(PybarsError, render, template, context)
def test_escaping_a_String_is_possible(self):
def hello(this, param):
return "Hello " + param
helpers = {'hello': hello}
template = u'Message: {{{hello "\\"world\\""}}}'
context = {}
result = "Message: Hello \"world\""
self.assertRender(template, context, result, helpers)
def test_it_works_with_single_quote_marks(self):
def hello(this, param):
return "Hello " + param
helpers = {'hello': hello}
template = u"Message: {{{hello 'Alan\\\'s world'}}}"
context = {}
result = "Message: Hello Alan's world"
self.assertRender(template, context, result, helpers)
def test_simple_multi_params_work(self):
def goodbye(this, cruel, world):
return "Goodbye " + cruel + " " + world
helpers = {'goodbye': goodbye}
template = u'Message: {{goodbye cruel world}}'
context = {
'cruel': "cruel",
'world': "world"
}
result = "Message: Goodbye cruel world"
self.assertRender(template, context, result, helpers)
def test_block_multi_params_work(self):
def goodbye(this, options, cruel, world):
return options['fn'](
{'greeting': "Goodbye", 'adj': cruel, 'noun': world})
helpers = {'goodbye': goodbye}
template = (
u'Message: {{#goodbye cruel world}}'
u'{{greeting}} {{adj}} {{noun}}{{/goodbye}}'
)
context = {
'cruel': "cruel",
'world': "world"
}
result = "Message: Goodbye cruel world"
self.assertRender(template, context, result, helpers)
def test_constructing_a_safestring_from_a_string_and_checking_its_type(self):
reference = "testing 1, 2, 3"
instance = strlist([reference])
self.assertIsInstance(instance, strlist)
self.assertEqual(str_class(reference), str_class(instance))
def test_if_a_context_is_not_found_helperMissing_is_used(self):
def link_to(this, helpername, context):
if helpername == 'link_to':
return strlist(("<a>", context, "</a>"))
helpers = {'helperMissing': link_to}
template = u"{{hello}} {{link_to world}}"
context = {
'hello': "Hello",
'world': "world"
}
result = "Hello <a>world</a>"
self.assertRender(template, context, result, helpers)
def test_Known_helper_should_render_helper(self):
helpers = {'hello': lambda this: "foo"}
template = u"{{hello}}"
context = {}
result = "foo"
self.assertRender(template, context, result, helpers)
def test_Unknown_helper_in_knownHelpers_only_mode_should_be_passed_as_undefined(self):
helpers = {
'typeof': lambda this, arg: str_class(type(arg)),
'hello': lambda this: "foo"
}
template = u"{{{typeof hello}}}"
context = {}
result = "<type 'NoneType'>" if sys.version_info < (3,) else "<class 'NoneType'>"
self.assertRender(template, context, result, helpers, knownHelpers=set(['typeof']), knownHelpersOnly=True)
def test_Builtin_helpers_available_in_knownHelpers_only_mode(self):
template = u"{{#unless foo}}bar{{/unless}}"
context = {}
result = "bar"
self.assertRender(template, context, result, knownHelpersOnly=True)
def test_Field_lookup_works_in_knownHelpers_only_mode(self):
template = u"{{foo}}"
context = {
'foo': 'bar'
}
result = "bar"
self.assertRender(template, context, result, knownHelpersOnly=True)
def test_Conditional_blocks_work_in_knownHelpers_only_mode(self):
template = u"{{#foo}}bar{{/foo}}"
context = {
'foo': 'baz'
}
result = "bar"
self.assertRender(template, context, result, knownHelpersOnly=True)
def test_Invert_blocks_work_in_knownHelpers_only_mode(self):
template = u"{{^foo}}bar{{/foo}}"
context = {
'foo': False
}
result = "bar"
self.assertRender(template, context, result, knownHelpersOnly=True)
def test_lambdas_are_resolved_by_blockHelperMissing_not_handlebars_proper(self):
# Probably should be called 'lambdas in the context are called as
# though for a simple block' - it wants to check moustache
# compatibility which allows all block stuff to be overridden via
# blockHelperMissing
template = u"{{#truthy}}yep{{/truthy}}"
context = {
'truthy': lambda this: True
}
result = "yep"
self.assertRender(template, context, result)
def test_default_helperMissing_no_params(self):
template = u"a{{missing}}b"
context = {}
result = "ab"
self.assertRender(template, context, result)
def test_default_helperMissing_with_param(self):
template = u"a{{missing something}}b"
context = {}
self.assertRaises(PybarsError, render, template, context)
def test_with(self):
template = u"{{#with person}}{{first}} {{last}}{{/with}}"
context = {
'person': {
'first': "Alan",
'last': "Johnson"
}
}
result = "Alan Johnson"
self.assertRender(template, context, result)
def test_if(self):
template = u"{{#if goodbye}}GOODBYE {{/if}}cruel {{world}}!"
context = {
'goodbye': True,
'world': "world"
}
result = u"GOODBYE cruel world!"
self.assertRender(template, context, result)
context = {
'goodbye': 'dummy',
'world': "world"
}
result = u"GOODBYE cruel world!"
self.assertRender(template, context, result)
context = {
'goodbye': False,
'world': "world"
}
result = u"cruel world!"
self.assertRender(template, context, result)
context = {
'world': "world"
}
result = u"cruel world!"
self.assertRender(template, context, result)
context = {
'goodbye': ['foo'],
'world': "world"
}
result = u"GOODBYE cruel world!"
self.assertRender(template, context, result)
context = {
'goodbye': [],
'world': "world"
}
result = u"cruel world!"
self.assertRender(template, context, result)
def test_if_else(self):
template = u"{{#if goodbye}}GOODBYE{{else}}Hello{{/if}} cruel {{world}}!"
context = {
'goodbye': False,
'world': "world"
}
result = u"Hello cruel world!"
self.assertRender(template, context, result)
def test_if_with_function_argument(self):
template = u"{{#if goodbye}}GOODBYE {{/if}}cruel {{world}}!"
context = {
'goodbye': lambda this: True,
'world': "world"
}
result = u"GOODBYE cruel world!"
self.assertRender(template, context, result)
context = {
'goodbye': lambda this: this['world'],
'world': "world"
}
result = u"GOODBYE cruel world!"
self.assertRender(template, context, result)
context = {
'goodbye': lambda this: False,
'world': "world"
}
result = u"cruel world!"
self.assertRender(template, context, result)
context = {
'goodbye': lambda this: None,
'world': "world"
}
result = u"cruel world!"
self.assertRender(template, context, result)
def test_resolve_with_attrs(self):
class TestAttr():
@property
def text(self):
return 'Hello'
class TestGet():
def get(self, name):
return {'text': 'Hi'}.get(name)
template = u"{{#each .}}{{test.text}}! {{/each}}"
context = [
{'test': TestAttr()},
{'test': TestGet()},
{'test': {'text': 'Goodbye'}}
]
result = "Hello! Hi! Goodbye! "
self.assertRender(template, context, result)
def test_list_context(self):
template = u"{{#each .}}{{#each .}}{{text}}! {{/each}}cruel world!{{/each}}"
context = [
[
{'text': "goodbye"},
{'text': "Goodbye"},
{'text': "GOODBYE"}
]
]
result = "goodbye! Goodbye! GOODBYE! cruel world!"
self.assertRender(template, context, result)
def test_context_with_attrs(self):
class TestContext():
@property
def text(self):
return 'Goodbye'
template = u"{{#each .}}{{text}}! {{/each}}cruel world!"
context = [
TestContext()
]
result = "Goodbye! cruel world!"
self.assertRender(template, context, result)
def test_each(self):
template = u"{{#each goodbyes}}{{text}}! {{/each}}cruel {{world}}!"
context = {
'goodbyes': [
{'text': "goodbye"},
{'text': "Goodbye"},
{'text': "GOODBYE"}
],
'world': "world"
}
result = "goodbye! Goodbye! GOODBYE! cruel world!"
self.assertRender(template, context, result)
context = {'goodbyes': [], 'world': "world"}
result = "cruel world!"
self.assertRender(template, context, result)
def test_each_this(self):
helpers = {
'capitalize': lambda this, value: value.upper()
}
template = u"{{#each name}}{{capitalize this}} {{/each}}"
context = {
'name': [
'John',
'James'
]
}
result = "JOHN JAMES "
self.assertRender(template, context, result, helpers)
def test_each_of_None(self):
template = u"Goodbye {{^each things}}cruel{{/each}} world!"
context = {
'things': None
}
result = u"Goodbye cruel world!"
self.assertRender(template, context, result)
def test_each_of_empty_list(self):
template = u"Goodbye {{#each things}}happy {{^}}cruel {{/each}}world!"
context = {
'things': []
}
result = u"Goodbye cruel world!"
self.assertRender(template, context, result)
def test_each_of_truthy_non_iterable_object(self):
template = u"Goodbye {{#each things}}happy {{^}}cruel {{/each}}world!"
context = {
'things': True
}
result = u"Goodbye cruel world!"
self.assertRender(template, context, result)
def test_each_with_object_and_key(self):
template = u"{{#each goodbyes}}{{@key}}. {{text}}! {{/each}}cruel {{world}}!"
context = {
'goodbyes': {
"<b>#1</b>": {
'text': "goodbye"
},
2: {
'text': "GOODBYE"
}
},
'world': "world"
}
self.assertIn(
render(template, context),
# Depending on iteration order, one will come before the other.
(
"<b>#1</b>. goodbye! 2. GOODBYE! cruel world!",
"2. GOODBYE! <b>#1</b>. goodbye! cruel world!"
)
)
def test_each_with_index(self):
template = u"{{#each goodbyes}}{{@index}}. {{text}}! {{/each}}cruel {{world}}!"
context = {
'goodbyes': [
{'text': "goodbye"},
{'text': "Goodbye"},
{'text': "GOODBYE"}
],
'world': "world"
}
result = "0. goodbye! 1. Goodbye! 2. GOODBYE! cruel world!"
self.assertRender(template, context, result)
def test_each_with_nested_index(self):
template = u"{{#each goodbyes}}{{@index}}. {{text}}! {{#each ../goodbyes}}{{@index}} {{/each}}After {{@index}} {{/each}}{{@index}}cruel {{world}}!"
context = {
'goodbyes': [
{'text': "goodbye"},
{'text': "Goodbye"},
{'text': "GOODBYE"}
],
'world': "world"
}
result = "0. goodbye! 0 1 2 After 0 1. Goodbye! 0 1 2 After 1 2. GOODBYE! 0 1 2 After 2 cruel world!"
self.assertRender(template, context, result)
def test_each_with_parent_index(self):
template = u"{{#each people}}{{#each foods}}{{../name}}({{@../index}}) likes {{name}}({{@index}}), {{/each}}{{/each}}"
context = {
'people': [
{
'name': 'John',
'foods': [
{'name': 'apples'},
{'name': 'pears'}
]
},
{
'name': 'Jane',
'foods': [
{'name': 'grapes'},
{'name': 'pineapple'}
]
}
]
}
result = "John(0) likes apples(0), John(0) likes pears(1), Jane(1) likes grapes(0), Jane(1) likes pineapple(1), "
self.assertRender(template, context, result)
def test_log(self):
template = u"{{log blah}}"
context = {
'blah': "whee"
}
result = ''
log = []
original_log = pybars.log
pybars.log = log.append
self.assertRender(template, context, result)
self.assertEqual(["whee"], log)
pybars.log = original_log
def test_overriding_property_lookup(self):
pass
# Empty upstream
# ... in data ... skipped
def test_helpers_take_precedence_over_same_named_context_properties(self):
helpers = {
'goodbye': lambda this: this['goodbye'].upper()
}
template = u"{{goodbye}} {{cruel world}}"
context = {
'goodbye': "goodbye",
'cruel': lambda this, world: "cruel " + world.upper(),
'world': "world"
}
result = "GOODBYE cruel WORLD"
self.assertRender(template, context, result, helpers)
def test_block_helpers_take_precedence_over_same_named_context_properties(self):
def goodbye(this, options):
return strlist([this['goodbye'].upper()]) + options['fn'](this)
helpers = {'goodbye': goodbye}
template = u"{{#goodbye}} {{cruel world}}{{/goodbye}}"
context = {
'goodbye': "goodbye",
'cruel': lambda this, world: "cruel " + world.upper(),
'world': "world"
}
result = "GOODBYE cruel WORLD"
self.assertRender(template, context, result, helpers)
def test_Scoped_names_take_precedence_over_helpers(self):
helpers = {
'goodbye': lambda this: this['goodbye'].upper()
}
template = u"{{this.goodbye}} {{cruel world}} {{cruel this.goodbye}}"
context = {
'goodbye': "goodbye",
'cruel': lambda this, world: "cruel " + world.upper(),
'world': "world"
}
result = u"goodbye cruel WORLD cruel GOODBYE"
self.assertRender(template, context, result, helpers)
def test_Scoped_names_take_precedence_over_block_helpers(self):
def goodbye(this, options):
return strlist([this['goodbye'].upper()]) + options['fn'](this)
helpers = {'goodbye': goodbye}
template = u"{{#goodbye}} {{cruel world}}{{/goodbye}} {{this.goodbye}}"
context = {
'goodbye': "goodbye",
'cruel': lambda this, world: "cruel " + world.upper(),
'world': "world"
}
result = "GOODBYE cruel WORLD goodbye"
self.assertRender(template, context, result, helpers)
def test_helpers_can_take_an_optional_hash(self):
# Note: the order is a rotation on the template order to avoid *args
# processing generating a false pass
def goodbye(this, times, cruel, world):
return "GOODBYE " + cruel + " " + world + " " + str(times) + " TIMES"
helpers = {'goodbye': goodbye}
template = u'{{goodbye cruel="CRUEL" world="WORLD" times=12}}'
context = {}
result = u"GOODBYE CRUEL WORLD 12 TIMES"
self.assertRender(template, context, result, helpers)
def test_helpers_can_take_an_optional_hash_with_booleans(self):
def goodbye(this, cruel, world, _print):
if _print is True:
return "GOODBYE " + cruel + " " + world
elif _print is False:
return "NOT PRINTING"
else:
return "THIS SHOULD NOT HAPPEN"
helpers = {'goodbye': goodbye}
template = u'{{goodbye cruel="CRUEL" world="WORLD" _print=true}}'
context = {}
result = "GOODBYE CRUEL WORLD"
self.assertRender(template, context, result, helpers)
template = u'{{goodbye cruel="CRUEL" world="WORLD" _print=false}}'
context = {}
result = "NOT PRINTING"
self.assertRender(template, context, result, helpers)
def test_block_helpers_can_take_an_optional_hash(self):
def goodbye(this, options, times, cruel):
return "GOODBYE " + cruel + " " + str_class(options['fn'](this)) + " " + str(times) + " TIMES"
helpers = {'goodbye': goodbye}
template = u'{{#goodbye cruel="CRUEL" times=12}}world{{/goodbye}}'
context = {}
result = "GOODBYE CRUEL world 12 TIMES"
self.assertRender(template, context, result, helpers)
def test_block_helpers_can_take_an_optional_hash_with_booleans(self):
def goodbye(this, options, cruel, _print):
if _print is True:
return "GOODBYE " + cruel + " " + str_class(options['fn'](this))
elif _print is False:
return "NOT PRINTING"
else:
return "THIS SHOULD NOT HAPPEN"
helpers = {'goodbye': goodbye}
template = u'{{#goodbye cruel="CRUEL" _print=true}}world{{/goodbye}}'
context = {}
result = "GOODBYE CRUEL world"
self.assertRender(template, context, result, helpers)
template = u'{{#goodbye cruel="CRUEL" _print=false}}world{{/goodbye}}'
context = {}
result = "NOT PRINTING"
self.assertRender(template, context, result, helpers)
def test_should_lookup_arbitrary_content(self):
template = u'{{#each goodbyes}}{{lookup ../data .}}{{/each}}'
context = {
'goodbyes': [
0,
1
],
'data': [
'foo',
'bar'
]
}
result = 'foobar'
self.assertRender(template, context, result)
def test_should_not_fail_on_undefined_value(self):
template = u'{{#each goodbyes}}{{lookup ../bar .}}{{/each}}'
context = {
'goodbyes': [
0,
1
],
'data': [
'foo',
'bar'
]
}
result = ''
self.assertRender(template, context, result)
def test_should_not_fail_on_unavailable_value(self):
template = u'{{lookup thelist 3}}.{{lookup theobject "qux"}}.{{lookup thenumber 0}}'
context = {
'thelist': [
'foo',
'bar'
],
'theobject': {
'foo': 'bar'
},
'thenumber': 7
}
result = '..'
self.assertRender(template, context, result)
def test_should_lookup_content_by_special_variables(self):
template = u'{{#each goodbyes}}{{lookup ../data @index}}{{/each}}'
context = {
'goodbyes': [
0,
1
],
'data': [
'foo',
'bar'
]
}
result = 'foobar'
self.assertRender(template, context, result)
def test_cannot_read_property_of_undefined(self):
template = u"{{#books}}{{title}}{{author.name}}{{/books}}"
context = {
"books": [
{
"title": "The origin of species",
"author": {
"name": "Charles Darwin"
}
},
{
"title": "Lazarillo de Tormes"
}
]
}
result = "The origin of speciesCharles DarwinLazarillo de Tormes"
self.assertRender(template, context, result)
def test_inverted_sections_print_when_they_shouldnt(self):
template = u"{{^set}}not set{{/set}} :: {{#set}}set{{/set}}"
context = {}
result = "not set :: "
self.assertRender(template, context, result)
context = {
'set': None
}
result = "not set :: "
self.assertRender(template, context, result)
context = {
'set': False
}
result = "not set :: "
self.assertRender(template, context, result)
context = {
'set': True
}
result = " :: set"
self.assertRender(template, context, result)
def test_Mustache_man_page(self):
template = (
u"Hello {{name}}. You have just won ${{value}}!"
u"{{#in_ca}} Well, ${{taxed_value}}, after taxes.{{/in_ca}}"
)
context = {
"name": "Chris",
"value": 10000,
# Note that the int here is not needed in JS because JS doesn't
# have ints and floats.
"taxed_value": int(10000 - (10000 * 0.4)),
"in_ca": True
}
result = "Hello Chris. You have just won $10000! Well, $6000, after taxes."
self.assertRender(template, context, result)
def test_GH_158__Using_array_index_twice_breaks_the_template(self):
template = u"{{arr.[0]}}, {{arr.[1]}}"
context = {
"arr": [
1,
2
]
}
result = "1, 2"
self.assertRender(template, context, result)
def test_bug_reported_by__fat_where_lambdas_weren_t_being_properly_resolved(self):
template = u"<strong>This is a slightly more complicated {{thing}}.</strong>.\n{{! Just ignore this business. }}\nCheck this out:\n{{#hasThings}}\n<ul>\n{{#things}}\n<li class={{className}}>{{word}}</li>\n{{/things}}</ul>.\n{{/hasThings}}\n{{^hasThings}}\n\n<small>Nothing to check out...</small>\n{{/hasThings}}"
context = {
'thing': lambda this: "blah",
'things': [
{
'className': "one",
'word': "@fat"
},
{
'className': "two",
'word': "@dhg"
},
{
'className': "three",
'word': "@sayrer"
}
],
'hasThings': lambda this: True
}
result = "<strong>This is a slightly more complicated blah.</strong>.\n\nCheck this out:\n\n<ul>\n\n<li class=one>@fat</li>\n\n<li class=two>@dhg</li>\n\n<li class=three>@sayrer</li>\n</ul>.\n\n"
self.assertRender(template, context, result)
def test_invalid_python_identifiers_cannot_be_used_as_keyword_arguments(self):
template = u'{{foo 0x="bar"}}'
context = {}
result = None
error = 'Error at character 7 of line 1 near 0x="bar"}}'
self.assertRender(template, context, result, error=error)
def test_invalid_closing_tags(self):
template = u'{{#foo}}{{/#foo}}'
context = {}
result = None
error = 'Error at character 11 of line 1 near {{/#foo}}'
self.assertRender(template, context, result, error=error)
def test_missing_bracket(self):
template = u'{{foo}'
context = {}
result = None
error = 'Error at character 5 of line 1 near {{foo}'
self.assertRender(template, context, result, error=error)
def test_backslash_does_not_normally_escape_text(self):
helpers = {
'echo': lambda this, arg: arg
}
template = u'{{echo "\\x"}}'
context = {}
result = '\\x'
self.assertRender(template, context, result, helpers)
def test_backslash_only_escapes_quote(self):
helpers = {
'echo': lambda this, arg: arg
}
# If the parser does not know to escape the backslash but does know to
# escape the quote, it will end up with something like the following
# in our generated rendering code:
#
# value = value(child_scope, "\\"")
#
# Which will raise a SyntaxError.
template = u'{{echo "\\\\""}}'
context = {}
result = '\\"'
self.assertRender(template, context, result, helpers)
def test_newlines_in_string_litereals(self):
helpers = {
'echo': lambda this, arg: arg
}
template = u'{{echo "Hello,\nWorld!"}}'
context = {}
result = 'Hello,\nWorld!'
self.assertRender(template, context, result, helpers)
def test_code_injection(self):
helpers = {
'echo': lambda this, arg: arg
}
# If esape sequences are not dealt with properly, we are able to run
# arbitrary Python code.
template = u'{{echo "\\\\")\n\n raise AssertionError(\'Code Injected!\')\n#"}}'
context = {}
result = '\\")\n\n raise AssertionError('Code Injected!')\n#'
self.assertRender(template, context, result, helpers)
def test_precompile(self):
try:
template = u"Goodbye\n{{cruel}}\n{{world}}!"
context = {
'cruel': "cruel",
'world': "world"
}
result = "Goodbye\ncruel\nworld!"
compiler = Compiler()
code = compiler.precompile(template)
with open('test_precompile.py', 'w') as f:
f.write(code)
import test_precompile
self.assertEqual(result, str(test_precompile.render(context)))
finally:
if os.path.exists('test_precompile.py'):
os.unlink('test_precompile.py')
|
isaacdd/pybars3
|
tests/test_acceptance.py
|
Python
|
lgpl-3.0
| 62,261
|
import numpy as np
from sklearn.datasets import load_iris
import metric_learn
CLASSES = {
'Covariance': metric_learn.Covariance(),
'ITML_Supervised': metric_learn.ITML_Supervised(num_constraints=200),
'LFDA': metric_learn.LFDA(k=2, dim=2),
'LMNN': metric_learn.LMNN(k=5, learn_rate=1e-6, verbose=False),
'LSML_Supervised': metric_learn.LSML_Supervised(num_constraints=200),
'MLKR': metric_learn.MLKR(),
'NCA': metric_learn.NCA(max_iter=700, learning_rate=0.01, num_dims=2),
'RCA_Supervised': metric_learn.RCA_Supervised(dim=2, num_chunks=30,
chunk_size=2),
'SDML_Supervised': metric_learn.SDML_Supervised(num_constraints=1500),
}
try:
from metric_learn.lmnn import python_LMNN
if python_LMNN is not metric_learn.LMNN:
CLASSES['python_LMNN'] = python_LMNN(k=5, learn_rate=1e-6, verbose=False)
except ImportError:
pass
class IrisDataset(object):
params = [sorted(CLASSES)]
param_names = ['alg']
def setup(self, alg):
iris_data = load_iris()
self.iris_points = iris_data['data']
self.iris_labels = iris_data['target']
def time_fit(self, alg):
np.random.seed(5555)
CLASSES[alg].fit(self.iris_points, self.iris_labels)
|
terrytangyuan/metric-learn
|
bench/benchmarks/iris.py
|
Python
|
mit
| 1,245
|
# coding=utf-8
from __future__ import unicode_literals
c_lang_config = {
"name": "c",
"compile": {
"group_memory": True,
"src_name": "main.c",
"exe_name": "main",
"max_cpu_time": 5.0,
"max_real_time": 10.0,
"max_memory": 512 * 1024, # 512M compile memory
"compile_command": "/usr/bin/gcc -DONLINE_JUDGE -O2 -w -fmax-errors=3 -std=c99 {src_path} -lm -o {exe_path}",
},
"run": {
"exe_name": "main",
"max_cpu_time": 1.0,
"max_real_time": 5.0,
"max_memory": 10 * 1024, # 10M compile memory
"command": "{exe_path}",
}
}
cpp_lang_config = {
"name": "c++",
"compile": {
"group_memory": True,
"src_name": "main.cpp",
"exe_name": "main",
"max_cpu_time": 5.0,
"max_real_time": 10.0,
"max_memory": 512 * 1024, # 512M compile memory
"compile_command": "/usr/bin/g++ -DONLINE_JUDGE -O2 -w -fmax-errors=3 -std=c++11 {src_path} -lm -o {exe_path}",
},
"run": {
"exe_name": "main",
"max_cpu_time": 1.0,
"max_real_time": 5.0,
"max_memory": 10 * 1024, # 10M compile memory
"command": "{exe_path}",
}
}
java_lang_config = {
"name": "java",
"compile": {
"group_memory": True,
"src_name": "Main.java",
"exe_name": "Main",
"max_cpu_time": 3.0,
"max_real_time": 5.0,
"max_memory": -1,
"compile_command": "/usr/bin/javac {src_path} -d {exe_name} -encoding UTF8"
},
"run": {
"group_memory": True,
"exe_name": "Main",
"max_cpu_time": 1.0,
"max_real_time": 5.0,
"max_memory": 10 * 1024, # 10M compile memory
"command": "/usr/bin/java -cp {exe_name} Main",
}
}
py2_lang_config = {
"name": "python2",
"compile": {
"src_name": "solution.py",
"exe_name": "solution.pyc",
"max_cpu_time": 3000,
"max_real_time": 5000,
"max_memory": 128 * 1024 ,
"compile_command": "/usr/bin/python -m py_compile {src_path}",
},
"run": {
"exe_name": "solution.pyc",
"command": "/usr/bin/python {exe_path}",
}
}
|
joeyac/JudgeServer
|
client/languages.py
|
Python
|
mit
| 2,209
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('dbmail', '0004_auto_20150321_2214'),
]
operations = [
migrations.CreateModel(
name='MailBaseTemplate',
fields=[
('id', models.AutoField(
verbose_name='ID', serialize=False,
auto_created=True, primary_key=True)),
('name', models.CharField(
unique=True, max_length=100, verbose_name='Name')),
('message', models.TextField(
help_text='Basic template for mail messages. '
'{{content}} tag for msg.',
verbose_name='Body')),
('created', models.DateTimeField(
auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(
auto_now=True, verbose_name='Updated')),
],
options={
'verbose_name': 'Mail base template',
'verbose_name_plural': 'Mail base templates',
},
),
migrations.AddField(
model_name='mailtemplate',
name='base',
field=models.ForeignKey(
verbose_name='Basic template', blank=True,
to='dbmail.MailBaseTemplate', null=True, on_delete=models.CASCADE),
),
]
|
LPgenerator/django-db-mailer
|
dbmail/migrations/0005_auto_20150506_2201.py
|
Python
|
gpl-2.0
| 1,511
|
'''
rrsig_ttls_match - Record test: RrsigTtlsMatch
.. Copyright (c) 2015 Neustar, Inc. All rights reserved.
.. See COPYRIGHT.txt for full notice. See LICENSE.txt for terms and conditions.
'''
import dns_sprockets_lib.validators as validators
class RrsigTtlsMatch(validators.RecTest):
# pylint: disable=too-few-public-methods
'''
Checks RRSIG TTL's match original and covered TTL's.
'''
TEST_DNSSECTYPE = True
TEST_RRTYPE = 'RRSIG'
def run(self, context, suggested_tested, name, ttl, rdata):
# pylint: disable=too-many-arguments
result = None
if ttl != rdata.original_ttl:
result = 'TTL doesn\'t match original TTL'
rdataset = context.zone_obj.get_rdataset(name, rdata.type_covered)
if rdataset and ttl != rdataset.ttl:
if result:
result += ', and '
else:
result = ''
result += 'TTL doesn\'t match covered TTL'
return (suggested_tested, result)
# end of file
|
ultradns/dns_sprockets
|
dns_sprockets_lib/validators/rrsig_ttls_match.py
|
Python
|
apache-2.0
| 1,026
|
import sublime
from sublime_plugin import WindowCommand
from ..git_command import GitCommand
from ...common import util
from ..ui_mixins.quick_panel import show_remote_panel
from ..ui_mixins.input_panel import show_single_line_input_panel
class GsRemoteAddCommand(WindowCommand, GitCommand):
"""
Add remotes
"""
def run(self, url=None):
# Get remote name from user
if url:
self.on_enter_remote(url)
else:
show_single_line_input_panel("Remote URL", "", self.on_enter_remote, None, None)
def on_enter_remote(self, input_url):
self.url = input_url
owner = self.username_from_url(input_url)
show_single_line_input_panel("Remote name", owner, self.on_enter_name, None, None)
def on_enter_name(self, remote_name):
self.git("remote", "add", remote_name, self.url)
if sublime.ok_cancel_dialog("Your remote was added successfully. Would you like to fetch from this remote?"):
self.window.run_command("gs_fetch", {"remote": remote_name})
class GsRemoteRemoveCommand(WindowCommand, GitCommand):
"""
Remove remotes
"""
def run(self):
show_remote_panel(self.on_remote_selection, show_url=True)
def on_remote_selection(self, remote):
if not remote:
return
@util.actions.destructive(description="remove a remote")
def remove():
self.git("remote", "remove", remote)
remove()
class GsRemoteRenameCommand(WindowCommand, GitCommand):
"""
Reame remotes
"""
def run(self):
show_remote_panel(self.on_remote_selection, show_url=True)
def on_remote_selection(self, remote):
if not remote:
return
self.remote = remote
show_single_line_input_panel("Remote name", remote, self.on_enter_name, None, None)
def on_enter_name(self, new_name):
self.git("remote", "rename", self.remote, new_name)
self.window.status_message("remote {} was renamed as {}.".format(self.remote, new_name))
|
divmain/GitSavvy
|
core/commands/remote.py
|
Python
|
mit
| 2,065
|
from __future__ import print_function
import os
import logging
from pdb import pm
from miasm.loader import pe
from miasm.analysis.sandbox import Sandbox_Win_x86_32
from miasm.os_dep.common import get_win_str_a
# User defined methods
def kernel32_GetProcAddress(jitter):
"""Hook on GetProcAddress to note where UPX stores import pointers"""
ret_ad, args = jitter.func_args_stdcall(["libbase", "fname"])
# When the function is called, EBX is a pointer to the destination buffer
dst_ad = jitter.cpu.EBX
logging.error('EBX ' + hex(dst_ad))
# Handle ordinal imports
fname = (args.fname if args.fname < 0x10000
else get_win_str_a(jitter, args.fname))
logging.error(fname)
# Get the generated address of the library, and store it in memory to
# dst_ad
ad = sb.libs.lib_get_add_func(args.libbase, fname, dst_ad)
# Add a breakpoint in case of a call on the resolved function
# NOTE: never happens in UPX, just for skeleton
jitter.handle_function(ad)
jitter.func_ret_stdcall(ret_ad, ad)
parser = Sandbox_Win_x86_32.parser(description="Generic UPX unpacker")
parser.add_argument("filename", help="PE Filename")
parser.add_argument('-v', "--verbose",
help="verbose mode", action="store_true")
parser.add_argument("--graph",
help="Export the CFG graph in graph.dot",
action="store_true")
options = parser.parse_args()
options.load_hdr = True
sb = Sandbox_Win_x86_32(options.filename, options, globals(),
parse_reloc=False)
if options.verbose is True:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
if options.verbose is True:
print(sb.jitter.vm)
# Ensure there is one and only one leave (for OEP discovering)
mdis = sb.machine.dis_engine(sb.jitter.bs)
mdis.dont_dis_nulstart_bloc = True
asmcfg = mdis.dis_multiblock(sb.entry_point)
leaves = list(asmcfg.get_bad_blocks())
assert(len(leaves) == 1)
l = leaves.pop()
logging.info(l)
end_offset = mdis.loc_db.get_location_offset(l.loc_key)
logging.info('final offset')
logging.info(hex(end_offset))
# Export CFG graph (dot format)
if options.graph is True:
open("graph.dot", "w").write(asmcfg.dot())
if options.verbose is True:
print(sb.jitter.vm)
def update_binary(jitter):
sb.pe.Opthdr.AddressOfEntryPoint = sb.pe.virt2rva(jitter.pc)
logging.info('updating binary')
for s in sb.pe.SHList:
sdata = sb.jitter.vm.get_mem(sb.pe.rva2virt(s.addr), s.rawsize)
sb.pe.rva.set(s.addr, sdata)
# Stop execution
jitter.run = False
return False
# Set callbacks
sb.jitter.add_breakpoint(end_offset, update_binary)
# Run
sb.run()
# Rebuild PE
# Alternative solution: miasm.jitter.loader.pe.vm2pe(sb.jitter, out_fname,
# libs=sb.libs, e_orig=sb.pe)
new_dll = []
sb.pe.SHList.align_sections(0x1000, 0x1000)
logging.info(repr(sb.pe.SHList))
sb.pe.DirRes = pe.DirRes(sb.pe)
sb.pe.DirImport.impdesc = None
logging.info(repr(sb.pe.DirImport.impdesc))
new_dll = sb.libs.gen_new_lib(sb.pe)
logging.info(new_dll)
sb.pe.DirImport.impdesc = []
sb.pe.DirImport.add_dlldesc(new_dll)
s_myimp = sb.pe.SHList.add_section(name="myimp", rawsize=len(sb.pe.DirImport))
logging.info(repr(sb.pe.SHList))
sb.pe.DirImport.set_rva(s_myimp.addr)
# XXXX TODO
sb.pe.NThdr.optentries[pe.DIRECTORY_ENTRY_DELAY_IMPORT].rva = 0
bname, fname = os.path.split(options.filename)
fname = os.path.join(bname, fname.replace('.', '_'))
open(fname + '_unupx.bin', 'wb').write(bytes(sb.pe))
|
commial/miasm
|
example/jitter/unpack_upx.py
|
Python
|
gpl-2.0
| 3,555
|
import unittest
from test import test_support
import gc
import weakref
import operator
import copy
import pickle
from random import randrange, shuffle
import sys
import collections
class PassThru(Exception):
pass
def check_pass_thru():
raise PassThru
yield 1
class BadCmp:
def __hash__(self):
return 1
def __cmp__(self, other):
raise RuntimeError
class ReprWrapper:
'Used to test self-referential repr() calls'
def __repr__(self):
return repr(self.value)
class HashCountingInt(int):
'int-like object that counts the number of times __hash__ is called'
def __init__(self, *args):
self.hash_count = 0
def __hash__(self):
self.hash_count += 1
return int.__hash__(self)
class TestJointOps(unittest.TestCase):
# Tests common to both set and frozenset
def setUp(self):
self.word = word = 'simsalabim'
self.otherword = 'madagascar'
self.letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.s = self.thetype(word)
self.d = dict.fromkeys(word)
def test_new_or_init(self):
self.assertRaises(TypeError, self.thetype, [], 2)
self.assertRaises(TypeError, set().__init__, a=1)
def test_uniquification(self):
actual = sorted(self.s)
expected = sorted(self.d)
self.assertEqual(actual, expected)
self.assertRaises(PassThru, self.thetype, check_pass_thru())
self.assertRaises(TypeError, self.thetype, [[]])
def test_len(self):
self.assertEqual(len(self.s), len(self.d))
def test_contains(self):
for c in self.letters:
self.assertEqual(c in self.s, c in self.d)
self.assertRaises(TypeError, self.s.__contains__, [[]])
s = self.thetype([frozenset(self.letters)])
self.assertIn(self.thetype(self.letters), s)
def test_union(self):
u = self.s.union(self.otherword)
for c in self.letters:
self.assertEqual(c in u, c in self.d or c in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(u), self.thetype)
self.assertRaises(PassThru, self.s.union, check_pass_thru())
self.assertRaises(TypeError, self.s.union, [[]])
for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
self.assertEqual(self.thetype('abcba').union(C('cdc')), set('abcd'))
self.assertEqual(self.thetype('abcba').union(C('efgfe')), set('abcefg'))
self.assertEqual(self.thetype('abcba').union(C('ccb')), set('abc'))
self.assertEqual(self.thetype('abcba').union(C('ef')), set('abcef'))
self.assertEqual(self.thetype('abcba').union(C('ef'), C('fg')), set('abcefg'))
# Issue #6573
x = self.thetype()
self.assertEqual(x.union(set([1]), x, set([2])), self.thetype([1, 2]))
def test_or(self):
i = self.s.union(self.otherword)
self.assertEqual(self.s | set(self.otherword), i)
self.assertEqual(self.s | frozenset(self.otherword), i)
try:
self.s | self.otherword
except TypeError:
pass
else:
self.fail("s|t did not screen-out general iterables")
def test_intersection(self):
i = self.s.intersection(self.otherword)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.thetype)
self.assertRaises(PassThru, self.s.intersection, check_pass_thru())
for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
self.assertEqual(self.thetype('abcba').intersection(C('cdc')), set('cc'))
self.assertEqual(self.thetype('abcba').intersection(C('efgfe')), set(''))
self.assertEqual(self.thetype('abcba').intersection(C('ccb')), set('bc'))
self.assertEqual(self.thetype('abcba').intersection(C('ef')), set(''))
self.assertEqual(self.thetype('abcba').intersection(C('cbcf'), C('bag')), set('b'))
s = self.thetype('abcba')
z = s.intersection()
if self.thetype == frozenset():
self.assertEqual(id(s), id(z))
else:
self.assertNotEqual(id(s), id(z))
def test_isdisjoint(self):
def f(s1, s2):
'Pure python equivalent of isdisjoint()'
return not set(s1).intersection(s2)
for larg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef':
s1 = self.thetype(larg)
for rarg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef':
for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
s2 = C(rarg)
actual = s1.isdisjoint(s2)
expected = f(s1, s2)
self.assertEqual(actual, expected)
self.assertTrue(actual is True or actual is False)
def test_and(self):
i = self.s.intersection(self.otherword)
self.assertEqual(self.s & set(self.otherword), i)
self.assertEqual(self.s & frozenset(self.otherword), i)
try:
self.s & self.otherword
except TypeError:
pass
else:
self.fail("s&t did not screen-out general iterables")
def test_difference(self):
i = self.s.difference(self.otherword)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c not in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.thetype)
self.assertRaises(PassThru, self.s.difference, check_pass_thru())
self.assertRaises(TypeError, self.s.difference, [[]])
for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
self.assertEqual(self.thetype('abcba').difference(C('cdc')), set('ab'))
self.assertEqual(self.thetype('abcba').difference(C('efgfe')), set('abc'))
self.assertEqual(self.thetype('abcba').difference(C('ccb')), set('a'))
self.assertEqual(self.thetype('abcba').difference(C('ef')), set('abc'))
self.assertEqual(self.thetype('abcba').difference(), set('abc'))
self.assertEqual(self.thetype('abcba').difference(C('a'), C('b')), set('c'))
def test_sub(self):
i = self.s.difference(self.otherword)
self.assertEqual(self.s - set(self.otherword), i)
self.assertEqual(self.s - frozenset(self.otherword), i)
try:
self.s - self.otherword
except TypeError:
pass
else:
self.fail("s-t did not screen-out general iterables")
def test_symmetric_difference(self):
i = self.s.symmetric_difference(self.otherword)
for c in self.letters:
self.assertEqual(c in i, (c in self.d) ^ (c in self.otherword))
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.thetype)
self.assertRaises(PassThru, self.s.symmetric_difference, check_pass_thru())
self.assertRaises(TypeError, self.s.symmetric_difference, [[]])
for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
self.assertEqual(self.thetype('abcba').symmetric_difference(C('cdc')), set('abd'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('efgfe')), set('abcefg'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('ccb')), set('a'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('ef')), set('abcef'))
def test_xor(self):
i = self.s.symmetric_difference(self.otherword)
self.assertEqual(self.s ^ set(self.otherword), i)
self.assertEqual(self.s ^ frozenset(self.otherword), i)
try:
self.s ^ self.otherword
except TypeError:
pass
else:
self.fail("s^t did not screen-out general iterables")
def test_equality(self):
self.assertEqual(self.s, set(self.word))
self.assertEqual(self.s, frozenset(self.word))
self.assertEqual(self.s == self.word, False)
self.assertNotEqual(self.s, set(self.otherword))
self.assertNotEqual(self.s, frozenset(self.otherword))
self.assertEqual(self.s != self.word, True)
def test_setOfFrozensets(self):
t = map(frozenset, ['abcdef', 'bcd', 'bdcb', 'fed', 'fedccba'])
s = self.thetype(t)
self.assertEqual(len(s), 3)
def test_compare(self):
self.assertRaises(TypeError, self.s.__cmp__, self.s)
def test_sub_and_super(self):
p, q, r = map(self.thetype, ['ab', 'abcde', 'def'])
self.assertTrue(p < q)
self.assertTrue(p <= q)
self.assertTrue(q <= q)
self.assertTrue(q > p)
self.assertTrue(q >= p)
self.assertFalse(q < r)
self.assertFalse(q <= r)
self.assertFalse(q > r)
self.assertFalse(q >= r)
self.assertTrue(set('a').issubset('abc'))
self.assertTrue(set('abc').issuperset('a'))
self.assertFalse(set('a').issubset('cbs'))
self.assertFalse(set('cbs').issuperset('a'))
def test_pickling(self):
for i in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(self.s, i)
dup = pickle.loads(p)
self.assertEqual(self.s, dup, "%s != %s" % (self.s, dup))
if type(self.s) not in (set, frozenset):
self.s.x = 10
p = pickle.dumps(self.s, i)
dup = pickle.loads(p)
self.assertEqual(self.s.x, dup.x)
def test_deepcopy(self):
class Tracer:
def __init__(self, value):
self.value = value
def __hash__(self):
return self.value
def __deepcopy__(self, memo=None):
return Tracer(self.value + 1)
t = Tracer(10)
s = self.thetype([t])
dup = copy.deepcopy(s)
self.assertNotEqual(id(s), id(dup))
for elem in dup:
newt = elem
self.assertNotEqual(id(t), id(newt))
self.assertEqual(t.value + 1, newt.value)
def test_gc(self):
# Create a nest of cycles to exercise overall ref count check
class A:
pass
s = set(A() for i in xrange(1000))
for elem in s:
elem.cycle = s
elem.sub = elem
elem.set = set([elem])
def test_subclass_with_custom_hash(self):
# Bug #1257731
class H(self.thetype):
def __hash__(self):
return int(id(self) & 0x7fffffff)
s=H()
f=set()
f.add(s)
self.assertIn(s, f)
f.remove(s)
f.add(s)
f.discard(s)
def test_badcmp(self):
s = self.thetype([BadCmp()])
# Detect comparison errors during insertion and lookup
self.assertRaises(RuntimeError, self.thetype, [BadCmp(), BadCmp()])
self.assertRaises(RuntimeError, s.__contains__, BadCmp())
# Detect errors during mutating operations
if hasattr(s, 'add'):
self.assertRaises(RuntimeError, s.add, BadCmp())
self.assertRaises(RuntimeError, s.discard, BadCmp())
self.assertRaises(RuntimeError, s.remove, BadCmp())
def test_cyclical_repr(self):
w = ReprWrapper()
s = self.thetype([w])
w.value = s
name = repr(s).partition('(')[0] # strip class name from repr string
self.assertEqual(repr(s), '%s([%s(...)])' % (name, name))
def test_cyclical_print(self):
w = ReprWrapper()
s = self.thetype([w])
w.value = s
fo = open(test_support.TESTFN, "wb")
try:
print >> fo, s,
fo.close()
fo = open(test_support.TESTFN, "rb")
self.assertEqual(fo.read(), repr(s))
finally:
fo.close()
test_support.unlink(test_support.TESTFN)
def test_do_not_rehash_dict_keys(self):
n = 10
d = dict.fromkeys(map(HashCountingInt, xrange(n)))
self.assertEqual(sum(elem.hash_count for elem in d), n)
s = self.thetype(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
s.difference(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
if hasattr(s, 'symmetric_difference_update'):
s.symmetric_difference_update(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
d2 = dict.fromkeys(set(d))
self.assertEqual(sum(elem.hash_count for elem in d), n)
d3 = dict.fromkeys(frozenset(d))
self.assertEqual(sum(elem.hash_count for elem in d), n)
d3 = dict.fromkeys(frozenset(d), 123)
self.assertEqual(sum(elem.hash_count for elem in d), n)
self.assertEqual(d3, dict.fromkeys(d, 123))
def test_container_iterator(self):
# Bug #3680: tp_traverse was not implemented for set iterator object
class C(object):
pass
obj = C()
ref = weakref.ref(obj)
container = set([obj, 1])
obj.x = iter(container)
del obj, container
gc.collect()
self.assertTrue(ref() is None, "Cycle was not collected")
def test_free_after_iterating(self):
test_support.check_free_after_iterating(self, iter, self.thetype)
class TestSet(TestJointOps):
thetype = set
def test_init(self):
s = self.thetype()
s.__init__(self.word)
self.assertEqual(s, set(self.word))
s.__init__(self.otherword)
self.assertEqual(s, set(self.otherword))
self.assertRaises(TypeError, s.__init__, s, 2);
self.assertRaises(TypeError, s.__init__, 1);
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertNotEqual(id(s), id(t))
def test_set_literal_insertion_order(self):
# SF Issue #26020 -- Expect left to right insertion
s = {1, 1.0, True}
self.assertEqual(len(s), 1)
stored_value = s.pop()
self.assertEqual(type(stored_value), int)
def test_set_literal_evaluation_order(self):
# Expect left to right expression evaluation
events = []
def record(obj):
events.append(obj)
s = {record(1), record(2), record(3)}
self.assertEqual(events, [1, 2, 3])
def test_hash(self):
self.assertRaises(TypeError, hash, self.s)
def test_clear(self):
self.s.clear()
self.assertEqual(self.s, set())
self.assertEqual(len(self.s), 0)
def test_copy(self):
dup = self.s.copy()
self.assertEqual(self.s, dup)
self.assertNotEqual(id(self.s), id(dup))
def test_add(self):
self.s.add('Q')
self.assertIn('Q', self.s)
dup = self.s.copy()
self.s.add('Q')
self.assertEqual(self.s, dup)
self.assertRaises(TypeError, self.s.add, [])
def test_remove(self):
self.s.remove('a')
self.assertNotIn('a', self.s)
self.assertRaises(KeyError, self.s.remove, 'Q')
self.assertRaises(TypeError, self.s.remove, [])
s = self.thetype([frozenset(self.word)])
self.assertIn(self.thetype(self.word), s)
s.remove(self.thetype(self.word))
self.assertNotIn(self.thetype(self.word), s)
self.assertRaises(KeyError, self.s.remove, self.thetype(self.word))
def test_remove_keyerror_unpacking(self):
# bug: www.python.org/sf/1576657
for v1 in ['Q', (1,)]:
try:
self.s.remove(v1)
except KeyError, e:
v2 = e.args[0]
self.assertEqual(v1, v2)
else:
self.fail()
def test_remove_keyerror_set(self):
key = self.thetype([3, 4])
try:
self.s.remove(key)
except KeyError as e:
self.assertTrue(e.args[0] is key,
"KeyError should be {0}, not {1}".format(key,
e.args[0]))
else:
self.fail()
def test_discard(self):
self.s.discard('a')
self.assertNotIn('a', self.s)
self.s.discard('Q')
self.assertRaises(TypeError, self.s.discard, [])
s = self.thetype([frozenset(self.word)])
self.assertIn(self.thetype(self.word), s)
s.discard(self.thetype(self.word))
self.assertNotIn(self.thetype(self.word), s)
s.discard(self.thetype(self.word))
def test_pop(self):
for i in xrange(len(self.s)):
elem = self.s.pop()
self.assertNotIn(elem, self.s)
self.assertRaises(KeyError, self.s.pop)
def test_update(self):
retval = self.s.update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
self.assertIn(c, self.s)
self.assertRaises(PassThru, self.s.update, check_pass_thru())
self.assertRaises(TypeError, self.s.update, [[]])
for p, q in (('cdc', 'abcd'), ('efgfe', 'abcefg'), ('ccb', 'abc'), ('ef', 'abcef')):
for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.update(C(p)), None)
self.assertEqual(s, set(q))
for p in ('cdc', 'efgfe', 'ccb', 'ef', 'abcda'):
q = 'ahi'
for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.update(C(p), C(q)), None)
self.assertEqual(s, set(s) | set(p) | set(q))
def test_ior(self):
self.s |= set(self.otherword)
for c in (self.word + self.otherword):
self.assertIn(c, self.s)
def test_intersection_update(self):
retval = self.s.intersection_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if c in self.otherword and c in self.word:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.intersection_update, check_pass_thru())
self.assertRaises(TypeError, self.s.intersection_update, [[]])
for p, q in (('cdc', 'c'), ('efgfe', ''), ('ccb', 'bc'), ('ef', '')):
for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.intersection_update(C(p)), None)
self.assertEqual(s, set(q))
ss = 'abcba'
s = self.thetype(ss)
t = 'cbc'
self.assertEqual(s.intersection_update(C(p), C(t)), None)
self.assertEqual(s, set('abcba')&set(p)&set(t))
def test_iand(self):
self.s &= set(self.otherword)
for c in (self.word + self.otherword):
if c in self.otherword and c in self.word:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_difference_update(self):
retval = self.s.difference_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if c in self.word and c not in self.otherword:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.difference_update, check_pass_thru())
self.assertRaises(TypeError, self.s.difference_update, [[]])
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
for p, q in (('cdc', 'ab'), ('efgfe', 'abc'), ('ccb', 'a'), ('ef', 'abc')):
for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.difference_update(C(p)), None)
self.assertEqual(s, set(q))
s = self.thetype('abcdefghih')
s.difference_update()
self.assertEqual(s, self.thetype('abcdefghih'))
s = self.thetype('abcdefghih')
s.difference_update(C('aba'))
self.assertEqual(s, self.thetype('cdefghih'))
s = self.thetype('abcdefghih')
s.difference_update(C('cdc'), C('aba'))
self.assertEqual(s, self.thetype('efghih'))
def test_isub(self):
self.s -= set(self.otherword)
for c in (self.word + self.otherword):
if c in self.word and c not in self.otherword:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_symmetric_difference_update(self):
retval = self.s.symmetric_difference_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if (c in self.word) ^ (c in self.otherword):
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.symmetric_difference_update, check_pass_thru())
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
for p, q in (('cdc', 'abd'), ('efgfe', 'abcefg'), ('ccb', 'a'), ('ef', 'abcef')):
for C in set, frozenset, dict.fromkeys, str, unicode, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.symmetric_difference_update(C(p)), None)
self.assertEqual(s, set(q))
def test_ixor(self):
self.s ^= set(self.otherword)
for c in (self.word + self.otherword):
if (c in self.word) ^ (c in self.otherword):
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_inplace_on_self(self):
t = self.s.copy()
t |= t
self.assertEqual(t, self.s)
t &= t
self.assertEqual(t, self.s)
t -= t
self.assertEqual(t, self.thetype())
t = self.s.copy()
t ^= t
self.assertEqual(t, self.thetype())
def test_weakref(self):
s = self.thetype('gallahad')
p = weakref.proxy(s)
self.assertEqual(str(p), str(s))
s = None
self.assertRaises(ReferenceError, str, p)
@unittest.skipUnless(hasattr(set, "test_c_api"),
'C API test only available in a debug build')
def test_c_api(self):
self.assertEqual(set().test_c_api(), True)
class SetSubclass(set):
pass
class TestSetSubclass(TestSet):
thetype = SetSubclass
class SetSubclassWithKeywordArgs(set):
def __init__(self, iterable=[], newarg=None):
set.__init__(self, iterable)
class TestSetSubclassWithKeywordArgs(TestSet):
def test_keywords_in_subclass(self):
'SF bug #1486663 -- this used to erroneously raise a TypeError'
SetSubclassWithKeywordArgs(newarg=1)
class TestFrozenSet(TestJointOps):
thetype = frozenset
def test_init(self):
s = self.thetype(self.word)
s.__init__(self.otherword)
self.assertEqual(s, set(self.word))
def test_singleton_empty_frozenset(self):
f = frozenset()
efs = [frozenset(), frozenset([]), frozenset(()), frozenset(''),
frozenset(), frozenset([]), frozenset(()), frozenset(''),
frozenset(xrange(0)), frozenset(frozenset()),
frozenset(f), f]
# All of the empty frozensets should have just one id()
self.assertEqual(len(set(map(id, efs))), 1)
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertEqual(id(s), id(t))
def test_hash(self):
self.assertEqual(hash(self.thetype('abcdeb')),
hash(self.thetype('ebecda')))
# make sure that all permutations give the same hash value
n = 100
seq = [randrange(n) for i in xrange(n)]
results = set()
for i in xrange(200):
shuffle(seq)
results.add(hash(self.thetype(seq)))
self.assertEqual(len(results), 1)
def test_copy(self):
dup = self.s.copy()
self.assertEqual(id(self.s), id(dup))
def test_frozen_as_dictkey(self):
seq = range(10) + list('abcdefg') + ['apple']
key1 = self.thetype(seq)
key2 = self.thetype(reversed(seq))
self.assertEqual(key1, key2)
self.assertNotEqual(id(key1), id(key2))
d = {}
d[key1] = 42
self.assertEqual(d[key2], 42)
def test_hash_caching(self):
f = self.thetype('abcdcda')
self.assertEqual(hash(f), hash(f))
def test_hash_effectiveness(self):
n = 13
hashvalues = set()
addhashvalue = hashvalues.add
elemmasks = [(i+1, 1<<i) for i in range(n)]
for i in xrange(2**n):
addhashvalue(hash(frozenset([e for e, m in elemmasks if m&i])))
self.assertEqual(len(hashvalues), 2**n)
class FrozenSetSubclass(frozenset):
pass
class TestFrozenSetSubclass(TestFrozenSet):
thetype = FrozenSetSubclass
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertNotEqual(id(s), id(t))
def test_copy(self):
dup = self.s.copy()
self.assertNotEqual(id(self.s), id(dup))
def test_nested_empty_constructor(self):
s = self.thetype()
t = self.thetype(s)
self.assertEqual(s, t)
def test_singleton_empty_frozenset(self):
Frozenset = self.thetype
f = frozenset()
F = Frozenset()
efs = [Frozenset(), Frozenset([]), Frozenset(()), Frozenset(''),
Frozenset(), Frozenset([]), Frozenset(()), Frozenset(''),
Frozenset(xrange(0)), Frozenset(Frozenset()),
Frozenset(frozenset()), f, F, Frozenset(f), Frozenset(F)]
# All empty frozenset subclass instances should have different ids
self.assertEqual(len(set(map(id, efs))), len(efs))
# Tests taken from test_sets.py =============================================
empty_set = set()
#==============================================================================
class TestBasicOps(unittest.TestCase):
def test_repr(self):
if self.repr is not None:
self.assertEqual(repr(self.set), self.repr)
def check_repr_against_values(self):
text = repr(self.set)
self.assertTrue(text.startswith('{'))
self.assertTrue(text.endswith('}'))
result = text[1:-1].split(', ')
result.sort()
sorted_repr_values = [repr(value) for value in self.values]
sorted_repr_values.sort()
self.assertEqual(result, sorted_repr_values)
def test_print(self):
fo = open(test_support.TESTFN, "wb")
try:
print >> fo, self.set,
fo.close()
fo = open(test_support.TESTFN, "rb")
self.assertEqual(fo.read(), repr(self.set))
finally:
fo.close()
test_support.unlink(test_support.TESTFN)
def test_length(self):
self.assertEqual(len(self.set), self.length)
def test_self_equality(self):
self.assertEqual(self.set, self.set)
def test_equivalent_equality(self):
self.assertEqual(self.set, self.dup)
def test_copy(self):
self.assertEqual(self.set.copy(), self.dup)
def test_self_union(self):
result = self.set | self.set
self.assertEqual(result, self.dup)
def test_empty_union(self):
result = self.set | empty_set
self.assertEqual(result, self.dup)
def test_union_empty(self):
result = empty_set | self.set
self.assertEqual(result, self.dup)
def test_self_intersection(self):
result = self.set & self.set
self.assertEqual(result, self.dup)
def test_empty_intersection(self):
result = self.set & empty_set
self.assertEqual(result, empty_set)
def test_intersection_empty(self):
result = empty_set & self.set
self.assertEqual(result, empty_set)
def test_self_isdisjoint(self):
result = self.set.isdisjoint(self.set)
self.assertEqual(result, not self.set)
def test_empty_isdisjoint(self):
result = self.set.isdisjoint(empty_set)
self.assertEqual(result, True)
def test_isdisjoint_empty(self):
result = empty_set.isdisjoint(self.set)
self.assertEqual(result, True)
def test_self_symmetric_difference(self):
result = self.set ^ self.set
self.assertEqual(result, empty_set)
def test_empty_symmetric_difference(self):
result = self.set ^ empty_set
self.assertEqual(result, self.set)
def test_self_difference(self):
result = self.set - self.set
self.assertEqual(result, empty_set)
def test_empty_difference(self):
result = self.set - empty_set
self.assertEqual(result, self.dup)
def test_empty_difference_rev(self):
result = empty_set - self.set
self.assertEqual(result, empty_set)
def test_iteration(self):
for v in self.set:
self.assertIn(v, self.values)
setiter = iter(self.set)
# note: __length_hint__ is an internal undocumented API,
# don't rely on it in your own programs
self.assertEqual(setiter.__length_hint__(), len(self.set))
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(self.set, proto)
copy = pickle.loads(p)
self.assertEqual(self.set, copy,
"%s != %s" % (self.set, copy))
#------------------------------------------------------------------------------
class TestBasicOpsEmpty(TestBasicOps):
def setUp(self):
self.case = "empty set"
self.values = []
self.set = set(self.values)
self.dup = set(self.values)
self.length = 0
self.repr = "set([])"
#------------------------------------------------------------------------------
class TestBasicOpsSingleton(TestBasicOps):
def setUp(self):
self.case = "unit set (number)"
self.values = [3]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 1
self.repr = "set([3])"
def test_in(self):
self.assertIn(3, self.set)
def test_not_in(self):
self.assertNotIn(2, self.set)
#------------------------------------------------------------------------------
class TestBasicOpsTuple(TestBasicOps):
def setUp(self):
self.case = "unit set (tuple)"
self.values = [(0, "zero")]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 1
self.repr = "set([(0, 'zero')])"
def test_in(self):
self.assertIn((0, "zero"), self.set)
def test_not_in(self):
self.assertNotIn(9, self.set)
#------------------------------------------------------------------------------
class TestBasicOpsTriple(TestBasicOps):
def setUp(self):
self.case = "triple set"
self.values = [0, "zero", operator.add]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 3
self.repr = None
#------------------------------------------------------------------------------
class TestBasicOpsString(TestBasicOps):
def setUp(self):
self.case = "string set"
self.values = ["a", "b", "c"]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 3
def test_repr(self):
self.check_repr_against_values()
#------------------------------------------------------------------------------
class TestBasicOpsUnicode(TestBasicOps):
def setUp(self):
self.case = "unicode set"
self.values = [u"a", u"b", u"c"]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 3
def test_repr(self):
self.check_repr_against_values()
#------------------------------------------------------------------------------
class TestBasicOpsMixedStringUnicode(TestBasicOps):
def setUp(self):
self.case = "string and bytes set"
self.values = ["a", "b", u"a", u"b"]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 4
def test_repr(self):
with test_support.check_warnings():
self.check_repr_against_values()
#==============================================================================
def baditer():
raise TypeError
yield True
def gooditer():
yield True
class TestExceptionPropagation(unittest.TestCase):
"""SF 628246: Set constructor should not trap iterator TypeErrors"""
def test_instanceWithException(self):
self.assertRaises(TypeError, set, baditer())
def test_instancesWithoutException(self):
# All of these iterables should load without exception.
set([1,2,3])
set((1,2,3))
set({'one':1, 'two':2, 'three':3})
set(xrange(3))
set('abc')
set(gooditer())
def test_changingSizeWhileIterating(self):
s = set([1,2,3])
try:
for i in s:
s.update([4])
except RuntimeError:
pass
else:
self.fail("no exception when changing size during iteration")
#==============================================================================
class TestSetOfSets(unittest.TestCase):
def test_constructor(self):
inner = frozenset([1])
outer = set([inner])
element = outer.pop()
self.assertEqual(type(element), frozenset)
outer.add(inner) # Rebuild set of sets with .add method
outer.remove(inner)
self.assertEqual(outer, set()) # Verify that remove worked
outer.discard(inner) # Absence of KeyError indicates working fine
#==============================================================================
class TestBinaryOps(unittest.TestCase):
def setUp(self):
self.set = set((2, 4, 6))
def test_eq(self): # SF bug 643115
self.assertEqual(self.set, set({2:1,4:3,6:5}))
def test_union_subset(self):
result = self.set | set([2])
self.assertEqual(result, set((2, 4, 6)))
def test_union_superset(self):
result = self.set | set([2, 4, 6, 8])
self.assertEqual(result, set([2, 4, 6, 8]))
def test_union_overlap(self):
result = self.set | set([3, 4, 5])
self.assertEqual(result, set([2, 3, 4, 5, 6]))
def test_union_non_overlap(self):
result = self.set | set([8])
self.assertEqual(result, set([2, 4, 6, 8]))
def test_intersection_subset(self):
result = self.set & set((2, 4))
self.assertEqual(result, set((2, 4)))
def test_intersection_superset(self):
result = self.set & set([2, 4, 6, 8])
self.assertEqual(result, set([2, 4, 6]))
def test_intersection_overlap(self):
result = self.set & set([3, 4, 5])
self.assertEqual(result, set([4]))
def test_intersection_non_overlap(self):
result = self.set & set([8])
self.assertEqual(result, empty_set)
def test_isdisjoint_subset(self):
result = self.set.isdisjoint(set((2, 4)))
self.assertEqual(result, False)
def test_isdisjoint_superset(self):
result = self.set.isdisjoint(set([2, 4, 6, 8]))
self.assertEqual(result, False)
def test_isdisjoint_overlap(self):
result = self.set.isdisjoint(set([3, 4, 5]))
self.assertEqual(result, False)
def test_isdisjoint_non_overlap(self):
result = self.set.isdisjoint(set([8]))
self.assertEqual(result, True)
def test_sym_difference_subset(self):
result = self.set ^ set((2, 4))
self.assertEqual(result, set([6]))
def test_sym_difference_superset(self):
result = self.set ^ set((2, 4, 6, 8))
self.assertEqual(result, set([8]))
def test_sym_difference_overlap(self):
result = self.set ^ set((3, 4, 5))
self.assertEqual(result, set([2, 3, 5, 6]))
def test_sym_difference_non_overlap(self):
result = self.set ^ set([8])
self.assertEqual(result, set([2, 4, 6, 8]))
def test_cmp(self):
a, b = set('a'), set('b')
self.assertRaises(TypeError, cmp, a, b)
# You can view this as a buglet: cmp(a, a) does not raise TypeError,
# because __eq__ is tried before __cmp__, and a.__eq__(a) returns True,
# which Python thinks is good enough to synthesize a cmp() result
# without calling __cmp__.
self.assertEqual(cmp(a, a), 0)
#==============================================================================
class TestUpdateOps(unittest.TestCase):
def setUp(self):
self.set = set((2, 4, 6))
def test_union_subset(self):
self.set |= set([2])
self.assertEqual(self.set, set((2, 4, 6)))
def test_union_superset(self):
self.set |= set([2, 4, 6, 8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_union_overlap(self):
self.set |= set([3, 4, 5])
self.assertEqual(self.set, set([2, 3, 4, 5, 6]))
def test_union_non_overlap(self):
self.set |= set([8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_union_method_call(self):
self.set.update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 3, 4, 5, 6]))
def test_intersection_subset(self):
self.set &= set((2, 4))
self.assertEqual(self.set, set((2, 4)))
def test_intersection_superset(self):
self.set &= set([2, 4, 6, 8])
self.assertEqual(self.set, set([2, 4, 6]))
def test_intersection_overlap(self):
self.set &= set([3, 4, 5])
self.assertEqual(self.set, set([4]))
def test_intersection_non_overlap(self):
self.set &= set([8])
self.assertEqual(self.set, empty_set)
def test_intersection_method_call(self):
self.set.intersection_update(set([3, 4, 5]))
self.assertEqual(self.set, set([4]))
def test_sym_difference_subset(self):
self.set ^= set((2, 4))
self.assertEqual(self.set, set([6]))
def test_sym_difference_superset(self):
self.set ^= set((2, 4, 6, 8))
self.assertEqual(self.set, set([8]))
def test_sym_difference_overlap(self):
self.set ^= set((3, 4, 5))
self.assertEqual(self.set, set([2, 3, 5, 6]))
def test_sym_difference_non_overlap(self):
self.set ^= set([8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_sym_difference_method_call(self):
self.set.symmetric_difference_update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 3, 5, 6]))
def test_difference_subset(self):
self.set -= set((2, 4))
self.assertEqual(self.set, set([6]))
def test_difference_superset(self):
self.set -= set((2, 4, 6, 8))
self.assertEqual(self.set, set([]))
def test_difference_overlap(self):
self.set -= set((3, 4, 5))
self.assertEqual(self.set, set([2, 6]))
def test_difference_non_overlap(self):
self.set -= set([8])
self.assertEqual(self.set, set([2, 4, 6]))
def test_difference_method_call(self):
self.set.difference_update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 6]))
#==============================================================================
class TestMutate(unittest.TestCase):
def setUp(self):
self.values = ["a", "b", "c"]
self.set = set(self.values)
def test_add_present(self):
self.set.add("c")
self.assertEqual(self.set, set("abc"))
def test_add_absent(self):
self.set.add("d")
self.assertEqual(self.set, set("abcd"))
def test_add_until_full(self):
tmp = set()
expected_len = 0
for v in self.values:
tmp.add(v)
expected_len += 1
self.assertEqual(len(tmp), expected_len)
self.assertEqual(tmp, self.set)
def test_remove_present(self):
self.set.remove("b")
self.assertEqual(self.set, set("ac"))
def test_remove_absent(self):
try:
self.set.remove("d")
self.fail("Removing missing element should have raised LookupError")
except LookupError:
pass
def test_remove_until_empty(self):
expected_len = len(self.set)
for v in self.values:
self.set.remove(v)
expected_len -= 1
self.assertEqual(len(self.set), expected_len)
def test_discard_present(self):
self.set.discard("c")
self.assertEqual(self.set, set("ab"))
def test_discard_absent(self):
self.set.discard("d")
self.assertEqual(self.set, set("abc"))
def test_clear(self):
self.set.clear()
self.assertEqual(len(self.set), 0)
def test_pop(self):
popped = {}
while self.set:
popped[self.set.pop()] = None
self.assertEqual(len(popped), len(self.values))
for v in self.values:
self.assertIn(v, popped)
def test_update_empty_tuple(self):
self.set.update(())
self.assertEqual(self.set, set(self.values))
def test_update_unit_tuple_overlap(self):
self.set.update(("a",))
self.assertEqual(self.set, set(self.values))
def test_update_unit_tuple_non_overlap(self):
self.set.update(("a", "z"))
self.assertEqual(self.set, set(self.values + ["z"]))
#==============================================================================
class TestSubsets(unittest.TestCase):
case2method = {"<=": "issubset",
">=": "issuperset",
}
reverse = {"==": "==",
"!=": "!=",
"<": ">",
">": "<",
"<=": ">=",
">=": "<=",
}
def test_issubset(self):
x = self.left
y = self.right
for case in "!=", "==", "<", "<=", ">", ">=":
expected = case in self.cases
# Test the binary infix spelling.
result = eval("x" + case + "y", locals())
self.assertEqual(result, expected)
# Test the "friendly" method-name spelling, if one exists.
if case in TestSubsets.case2method:
method = getattr(x, TestSubsets.case2method[case])
result = method(y)
self.assertEqual(result, expected)
# Now do the same for the operands reversed.
rcase = TestSubsets.reverse[case]
result = eval("y" + rcase + "x", locals())
self.assertEqual(result, expected)
if rcase in TestSubsets.case2method:
method = getattr(y, TestSubsets.case2method[rcase])
result = method(x)
self.assertEqual(result, expected)
#------------------------------------------------------------------------------
class TestSubsetEqualEmpty(TestSubsets):
left = set()
right = set()
name = "both empty"
cases = "==", "<=", ">="
#------------------------------------------------------------------------------
class TestSubsetEqualNonEmpty(TestSubsets):
left = set([1, 2])
right = set([1, 2])
name = "equal pair"
cases = "==", "<=", ">="
#------------------------------------------------------------------------------
class TestSubsetEmptyNonEmpty(TestSubsets):
left = set()
right = set([1, 2])
name = "one empty, one non-empty"
cases = "!=", "<", "<="
#------------------------------------------------------------------------------
class TestSubsetPartial(TestSubsets):
left = set([1])
right = set([1, 2])
name = "one a non-empty proper subset of other"
cases = "!=", "<", "<="
#------------------------------------------------------------------------------
class TestSubsetNonOverlap(TestSubsets):
left = set([1])
right = set([2])
name = "neither empty, neither contains"
cases = "!="
#==============================================================================
class TestOnlySetsInBinaryOps(unittest.TestCase):
def test_eq_ne(self):
# Unlike the others, this is testing that == and != *are* allowed.
self.assertEqual(self.other == self.set, False)
self.assertEqual(self.set == self.other, False)
self.assertEqual(self.other != self.set, True)
self.assertEqual(self.set != self.other, True)
def test_update_operator(self):
try:
self.set |= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_update(self):
if self.otherIsIterable:
self.set.update(self.other)
else:
self.assertRaises(TypeError, self.set.update, self.other)
def test_union(self):
self.assertRaises(TypeError, lambda: self.set | self.other)
self.assertRaises(TypeError, lambda: self.other | self.set)
if self.otherIsIterable:
self.set.union(self.other)
else:
self.assertRaises(TypeError, self.set.union, self.other)
def test_intersection_update_operator(self):
try:
self.set &= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_intersection_update(self):
if self.otherIsIterable:
self.set.intersection_update(self.other)
else:
self.assertRaises(TypeError,
self.set.intersection_update,
self.other)
def test_intersection(self):
self.assertRaises(TypeError, lambda: self.set & self.other)
self.assertRaises(TypeError, lambda: self.other & self.set)
if self.otherIsIterable:
self.set.intersection(self.other)
else:
self.assertRaises(TypeError, self.set.intersection, self.other)
def test_sym_difference_update_operator(self):
try:
self.set ^= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_sym_difference_update(self):
if self.otherIsIterable:
self.set.symmetric_difference_update(self.other)
else:
self.assertRaises(TypeError,
self.set.symmetric_difference_update,
self.other)
def test_sym_difference(self):
self.assertRaises(TypeError, lambda: self.set ^ self.other)
self.assertRaises(TypeError, lambda: self.other ^ self.set)
if self.otherIsIterable:
self.set.symmetric_difference(self.other)
else:
self.assertRaises(TypeError, self.set.symmetric_difference, self.other)
def test_difference_update_operator(self):
try:
self.set -= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_difference_update(self):
if self.otherIsIterable:
self.set.difference_update(self.other)
else:
self.assertRaises(TypeError,
self.set.difference_update,
self.other)
def test_difference(self):
self.assertRaises(TypeError, lambda: self.set - self.other)
self.assertRaises(TypeError, lambda: self.other - self.set)
if self.otherIsIterable:
self.set.difference(self.other)
else:
self.assertRaises(TypeError, self.set.difference, self.other)
#------------------------------------------------------------------------------
class TestOnlySetsNumeric(TestOnlySetsInBinaryOps):
def setUp(self):
self.set = set((1, 2, 3))
self.other = 19
self.otherIsIterable = False
#------------------------------------------------------------------------------
class TestOnlySetsDict(TestOnlySetsInBinaryOps):
def setUp(self):
self.set = set((1, 2, 3))
self.other = {1:2, 3:4}
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsTuple(TestOnlySetsInBinaryOps):
def setUp(self):
self.set = set((1, 2, 3))
self.other = (2, 4, 6)
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsString(TestOnlySetsInBinaryOps):
def setUp(self):
self.set = set((1, 2, 3))
self.other = 'abc'
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsGenerator(TestOnlySetsInBinaryOps):
def setUp(self):
def gen():
for i in xrange(0, 10, 2):
yield i
self.set = set((1, 2, 3))
self.other = gen()
self.otherIsIterable = True
#==============================================================================
class TestCopying(unittest.TestCase):
def test_copy(self):
dup = list(self.set.copy())
self.assertEqual(len(dup), len(self.set))
for el in self.set:
self.assertIn(el, dup)
pos = dup.index(el)
self.assertIs(el, dup.pop(pos))
self.assertFalse(dup)
def test_deep_copy(self):
dup = copy.deepcopy(self.set)
self.assertSetEqual(dup, self.set)
#------------------------------------------------------------------------------
class TestCopyingEmpty(TestCopying):
def setUp(self):
self.set = set()
#------------------------------------------------------------------------------
class TestCopyingSingleton(TestCopying):
def setUp(self):
self.set = set(["hello"])
#------------------------------------------------------------------------------
class TestCopyingTriple(TestCopying):
def setUp(self):
self.set = set(["zero", 0, None])
#------------------------------------------------------------------------------
class TestCopyingTuple(TestCopying):
def setUp(self):
self.set = set([(1, 2)])
#------------------------------------------------------------------------------
class TestCopyingNested(TestCopying):
def setUp(self):
self.set = set([((1, 2), (3, 4))])
#==============================================================================
class TestIdentities(unittest.TestCase):
def setUp(self):
self.a = set('abracadabra')
self.b = set('alacazam')
def test_binopsVsSubsets(self):
a, b = self.a, self.b
self.assertTrue(a - b < a)
self.assertTrue(b - a < b)
self.assertTrue(a & b < a)
self.assertTrue(a & b < b)
self.assertTrue(a | b > a)
self.assertTrue(a | b > b)
self.assertTrue(a ^ b < a | b)
def test_commutativity(self):
a, b = self.a, self.b
self.assertEqual(a&b, b&a)
self.assertEqual(a|b, b|a)
self.assertEqual(a^b, b^a)
if a != b:
self.assertNotEqual(a-b, b-a)
def test_summations(self):
# check that sums of parts equal the whole
a, b = self.a, self.b
self.assertEqual((a-b)|(a&b)|(b-a), a|b)
self.assertEqual((a&b)|(a^b), a|b)
self.assertEqual(a|(b-a), a|b)
self.assertEqual((a-b)|b, a|b)
self.assertEqual((a-b)|(a&b), a)
self.assertEqual((b-a)|(a&b), b)
self.assertEqual((a-b)|(b-a), a^b)
def test_exclusion(self):
# check that inverse operations show non-overlap
a, b, zero = self.a, self.b, set()
self.assertEqual((a-b)&b, zero)
self.assertEqual((b-a)&a, zero)
self.assertEqual((a&b)&(a^b), zero)
# Tests derived from test_itertools.py =======================================
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing next()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def next(self):
raise StopIteration
from itertools import chain, imap
def L(seqn):
'Test multiple tiers of iterators'
return chain(imap(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_constructor(self):
for cons in (set, frozenset):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertSetEqual(cons(g(s)), set(g(s)))
self.assertRaises(TypeError, cons , X(s))
self.assertRaises(TypeError, cons , N(s))
self.assertRaises(ZeroDivisionError, cons , E(s))
def test_inline_methods(self):
s = set('november')
for data in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5), 'december'):
for meth in (s.union, s.intersection, s.difference, s.symmetric_difference, s.isdisjoint):
for g in (G, I, Ig, L, R):
expected = meth(data)
actual = meth(g(data))
if isinstance(expected, bool):
self.assertEqual(actual, expected)
else:
self.assertSetEqual(actual, expected)
self.assertRaises(TypeError, meth, X(s))
self.assertRaises(TypeError, meth, N(s))
self.assertRaises(ZeroDivisionError, meth, E(s))
def test_inplace_methods(self):
for data in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5), 'december'):
for methname in ('update', 'intersection_update',
'difference_update', 'symmetric_difference_update'):
for g in (G, I, Ig, S, L, R):
s = set('january')
t = s.copy()
getattr(s, methname)(list(g(data)))
getattr(t, methname)(g(data))
self.assertSetEqual(s, t)
self.assertRaises(TypeError, getattr(set('january'), methname), X(data))
self.assertRaises(TypeError, getattr(set('january'), methname), N(data))
self.assertRaises(ZeroDivisionError, getattr(set('january'), methname), E(data))
class bad_eq:
def __eq__(self, other):
if be_bad:
set2.clear()
raise ZeroDivisionError
return self is other
def __hash__(self):
return 0
class bad_dict_clear:
def __eq__(self, other):
if be_bad:
dict2.clear()
return self is other
def __hash__(self):
return 0
class TestWeirdBugs(unittest.TestCase):
def test_8420_set_merge(self):
# This used to segfault
global be_bad, set2, dict2
be_bad = False
set1 = {bad_eq()}
set2 = {bad_eq() for i in range(75)}
be_bad = True
self.assertRaises(ZeroDivisionError, set1.update, set2)
be_bad = False
set1 = {bad_dict_clear()}
dict2 = {bad_dict_clear(): None}
be_bad = True
set1.symmetric_difference_update(dict2)
def test_iter_and_mutate(self):
# Issue #24581
s = set(range(100))
s.clear()
s.update(range(100))
si = iter(s)
s.clear()
a = list(range(100))
s.update(range(100))
list(si)
# Application tests (based on David Eppstein's graph recipes ====================================
def powerset(U):
"""Generates all subsets of a set or sequence U."""
U = iter(U)
try:
x = frozenset([U.next()])
for S in powerset(U):
yield S
yield S | x
except StopIteration:
yield frozenset()
def cube(n):
"""Graph of n-dimensional hypercube."""
singletons = [frozenset([x]) for x in range(n)]
return dict([(x, frozenset([x^s for s in singletons]))
for x in powerset(range(n))])
def linegraph(G):
"""Graph, the vertices of which are edges of G,
with two vertices being adjacent iff the corresponding
edges share a vertex."""
L = {}
for x in G:
for y in G[x]:
nx = [frozenset([x,z]) for z in G[x] if z != y]
ny = [frozenset([y,z]) for z in G[y] if z != x]
L[frozenset([x,y])] = frozenset(nx+ny)
return L
def faces(G):
'Return a set of faces in G. Where a face is a set of vertices on that face'
# currently limited to triangles,squares, and pentagons
f = set()
for v1, edges in G.items():
for v2 in edges:
for v3 in G[v2]:
if v1 == v3:
continue
if v1 in G[v3]:
f.add(frozenset([v1, v2, v3]))
else:
for v4 in G[v3]:
if v4 == v2:
continue
if v1 in G[v4]:
f.add(frozenset([v1, v2, v3, v4]))
else:
for v5 in G[v4]:
if v5 == v3 or v5 == v2:
continue
if v1 in G[v5]:
f.add(frozenset([v1, v2, v3, v4, v5]))
return f
class TestGraphs(unittest.TestCase):
def test_cube(self):
g = cube(3) # vert --> {v1, v2, v3}
vertices1 = set(g)
self.assertEqual(len(vertices1), 8) # eight vertices
for edge in g.values():
self.assertEqual(len(edge), 3) # each vertex connects to three edges
vertices2 = set(v for edges in g.values() for v in edges)
self.assertEqual(vertices1, vertices2) # edge vertices in original set
cubefaces = faces(g)
self.assertEqual(len(cubefaces), 6) # six faces
for face in cubefaces:
self.assertEqual(len(face), 4) # each face is a square
def test_cuboctahedron(self):
# http://en.wikipedia.org/wiki/Cuboctahedron
# 8 triangular faces and 6 square faces
# 12 identical vertices each connecting a triangle and square
g = cube(3)
cuboctahedron = linegraph(g) # V( --> {V1, V2, V3, V4}
self.assertEqual(len(cuboctahedron), 12)# twelve vertices
vertices = set(cuboctahedron)
for edges in cuboctahedron.values():
self.assertEqual(len(edges), 4) # each vertex connects to four other vertices
othervertices = set(edge for edges in cuboctahedron.values() for edge in edges)
self.assertEqual(vertices, othervertices) # edge vertices in original set
cubofaces = faces(cuboctahedron)
facesizes = collections.defaultdict(int)
for face in cubofaces:
facesizes[len(face)] += 1
self.assertEqual(facesizes[3], 8) # eight triangular faces
self.assertEqual(facesizes[4], 6) # six square faces
for vertex in cuboctahedron:
edge = vertex # Cuboctahedron vertices are edges in Cube
self.assertEqual(len(edge), 2) # Two cube vertices define an edge
for cubevert in edge:
self.assertIn(cubevert, g)
#==============================================================================
def test_main(verbose=None):
test_classes = (
TestSet,
TestSetSubclass,
TestSetSubclassWithKeywordArgs,
TestFrozenSet,
TestFrozenSetSubclass,
TestSetOfSets,
TestExceptionPropagation,
TestBasicOpsEmpty,
TestBasicOpsSingleton,
TestBasicOpsTuple,
TestBasicOpsTriple,
TestBinaryOps,
TestUpdateOps,
TestMutate,
TestSubsetEqualEmpty,
TestSubsetEqualNonEmpty,
TestSubsetEmptyNonEmpty,
TestSubsetPartial,
TestSubsetNonOverlap,
TestOnlySetsNumeric,
TestOnlySetsDict,
TestOnlySetsTuple,
TestOnlySetsString,
TestOnlySetsGenerator,
TestCopyingEmpty,
TestCopyingSingleton,
TestCopyingTriple,
TestCopyingTuple,
TestCopyingNested,
TestIdentities,
TestVariousIteratorArgs,
TestGraphs,
TestWeirdBugs,
)
test_support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == "__main__":
test_main(verbose=True)
|
HiSPARC/station-software
|
user/python/Lib/test/test_set.py
|
Python
|
gpl-3.0
| 62,794
|
#common sets up the conduit environment
from common import *
import conduit.dataproviders.File as FileDataProvider
import conduit.datatypes.File as File
import conduit.utils as Utils
import conduit.vfs as Vfs
GROUP_NAME = "Cheese"
NESTED_DIR_NAME = "A Directory"
DIFFERENT_GROUP_NAME = "Steak"
def create_file(inDirURI):
name = Utils.random_string()+".txt"
uri = Vfs.uri_join(inDirURI.replace("file://",""),name)
f = open(uri,'w')
f.write(Utils.random_string())
f.close()
return name,uri
#Test removable volume support
removableUri = get_external_resources('folder')['removable-volume']
ok("Is on a removable volume", FileDataProvider.is_on_removable_volume(removableUri))
#save and restore a group
groupInfo = (removableUri, GROUP_NAME)
ok("Save group info", FileDataProvider.save_removable_volume_group_file(*groupInfo))
readInfo = FileDataProvider.read_removable_volume_group_file(removableUri)
ok("Read group info (%s)" % str(readInfo), len(readInfo) > 0 and readInfo[0][1] == GROUP_NAME)
#create some test directories
dpdir = "file://"+Utils.new_tempdir()
tempdir = "file://"+Utils.new_tempdir()
tempdir2 = Vfs.uri_join(tempdir, NESTED_DIR_NAME)
Vfs.uri_make_directory(tempdir2)
#create some test files
f1Name, f1URI = create_file(tempdir)
f2Name, f2URI = create_file(tempdir2)
#create a test dataprovider
dp = FileDataProvider.FolderTwoWay(
folder=dpdir,
folderGroupName=GROUP_NAME,
includeHidden=False,
compareIgnoreMtime=False,
followSymlinks=False)
# Scenario 1)
# File came from a foreign DP like tomboy. No concept of relative path
# or group. Goes into the folder and keeps its name
plainFile = Utils.new_tempfile("TomboyNote")
plainFileName = plainFile.get_filename()
rid = dp.put(
vfsFile=plainFile,
overwrite=False,
LUID=None)
ok("Put plain file", rid.get_UID() == Vfs.uri_join(dpdir,plainFileName))
# Scehario 2a)
# File came from another folder dp with the same group
# Goes into the folder, keeps its relative path
f1 = File.File(
URI=f1URI,
basepath=tempdir,
group=GROUP_NAME)
rid = dp.put(
vfsFile=f1,
overwrite=False,
LUID=None)
ok("Put same group file", rid.get_UID() == Vfs.uri_join(dpdir,f1Name))
f2 = File.File(
URI=f2URI,
basepath=tempdir,
group=GROUP_NAME)
rid = dp.put(
vfsFile=f2,
overwrite=False,
LUID=None)
ok("Put same group file in nested dir", rid.get_UID() == Vfs.uri_join(dpdir,NESTED_DIR_NAME,f2Name))
# Scehario 2b)
# File came from another folder dp with a different group
# Goes into a new folder, by the name of the group, keeps its relative path
f1 = File.File(
URI=f1URI,
basepath=tempdir,
group=DIFFERENT_GROUP_NAME)
rid = dp.put(
vfsFile=f1,
overwrite=False,
LUID=None)
ok("Put different group file", rid.get_UID() == Vfs.uri_join(dpdir,DIFFERENT_GROUP_NAME,f1Name))
f2 = File.File(
URI=f2URI,
basepath=tempdir,
group=DIFFERENT_GROUP_NAME)
rid = dp.put(
vfsFile=f2,
overwrite=False,
LUID=None)
ok("Put different group file in nested dir", rid.get_UID() == Vfs.uri_join(dpdir,DIFFERENT_GROUP_NAME,NESTED_DIR_NAME,f2Name))
finished()
|
GNOME/conduit
|
test/python-tests/TestDataProviderFolder.py
|
Python
|
gpl-2.0
| 3,301
|
# -*- coding: utf-8 -*-
# Copyright (C) 2016 Fabio Falcinelli, Maximilian Hils
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import subprocess
import sys
from ctypes import byref, c_uint64, c_uint, c_char, c_char_p
from pydivert import windivert_dll
from pydivert.consts import Layer, Direction, Flag
from pydivert.packet import Packet
from pydivert.util import PY2
DEFAULT_PACKET_BUFFER_SIZE = 1500
class WinDivert(object):
"""
A WinDivert handle that can be used to capture packets.
The main methods are `.open()`, `.recv()`, `.send()` and `.close()`.
Use it like so::
with pydivert.WinDivert() as w:
for packet in w:
print(packet)
w.send(packet)
"""
def __init__(self, filter="true", layer=Layer.NETWORK, priority=0, flags=Flag.DEFAULT):
self._handle = None
self._filter = filter.encode()
self._layer = layer
self._priority = priority
self._flags = flags
def __repr__(self):
return '<WinDivert state="{}" filter="{}" layer="{}" priority="{}" flags="{}" />'.format(
"open" if self._handle is not None else "closed",
self._filter.decode(),
self._layer,
self._priority,
self._flags
)
def __enter__(self):
self.open()
return self
def __exit__(self, *args):
self.close()
def __iter__(self):
return self
def __next__(self):
return self.recv()
if sys.version_info < (3, 0):
next = __next__
@staticmethod
def register():
"""
An utility method to register the service the first time.
It is usually not required to call this function, as WinDivert will register itself when opening a handle.
"""
with WinDivert("false"):
pass
@staticmethod
def is_registered():
"""
Check if the WinDivert service is currently installed on the system.
"""
return subprocess.call("sc query WinDivert1.3", stdout=subprocess.PIPE,
stderr=subprocess.PIPE) == 0
@staticmethod
def unregister():
"""
Unregisters the WinDivert service.
This function only requests a service stop, which may not be processed immediately if there are still open
handles.
"""
subprocess.check_call("sc stop WinDivert1.3", stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
@staticmethod
def check_filter(filter, layer=Layer.NETWORK):
"""
Checks if the given packet filter string is valid with respect to the filter language.
The remapped function is WinDivertHelperCheckFilter::
BOOL WinDivertHelperCheckFilter(
__in const char *filter,
__in WINDIVERT_LAYER layer,
__out_opt const char **errorStr,
__out_opt UINT *errorPos
);
See: https://reqrypt.org/windivert-doc.html#divert_helper_check_filter
:return: A tuple (res, pos, msg) with check result in 'res' human readable description of the error in 'msg' and the error's position in 'pos'.
"""
res, pos, msg = False, c_uint(), c_char_p()
try:
res = windivert_dll.WinDivertHelperCheckFilter(filter.encode(), layer, byref(msg), byref(pos))
except OSError:
pass
return res, pos.value, msg.value.decode()
def open(self):
"""
Opens a WinDivert handle for the given filter.
Unless otherwise specified by flags, any packet that matches the filter will be diverted to the handle.
Diverted packets can be read by the application with receive().
The remapped function is WinDivertOpen::
HANDLE WinDivertOpen(
__in const char *filter,
__in WINDIVERT_LAYER layer,
__in INT16 priority,
__in UINT64 flags
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_open
"""
if self.is_open:
raise RuntimeError("WinDivert handle is already open.")
self._handle = windivert_dll.WinDivertOpen(self._filter, self._layer, self._priority,
self._flags)
@property
def is_open(self):
"""
Indicates if there is currently an open handle.
"""
return bool(self._handle)
def close(self):
"""
Closes the handle opened by open().
The remapped function is WinDivertClose::
BOOL WinDivertClose(
__in HANDLE handle
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_close
"""
if not self.is_open:
raise RuntimeError("WinDivert handle is not open.")
windivert_dll.WinDivertClose(self._handle)
self._handle = None
def recv(self, bufsize=DEFAULT_PACKET_BUFFER_SIZE):
"""
Receives a diverted packet that matched the filter.
The remapped function is WinDivertRecv::
BOOL WinDivertRecv(
__in HANDLE handle,
__out PVOID pPacket,
__in UINT packetLen,
__out_opt PWINDIVERT_ADDRESS pAddr,
__out_opt UINT *recvLen
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_recv
:return: The return value is a `pydivert.Packet`.
"""
if self._handle is None:
raise RuntimeError("WinDivert handle is not open")
packet = bytearray(bufsize)
packet_ = (c_char * bufsize).from_buffer(packet)
address = windivert_dll.WinDivertAddress()
recv_len = c_uint(0)
windivert_dll.WinDivertRecv(self._handle, packet_, bufsize, byref(address), byref(recv_len))
return Packet(
memoryview(packet)[:recv_len.value],
(address.IfIdx, address.SubIfIdx),
Direction(address.Direction)
)
def send(self, packet, recalculate_checksum=True):
"""
Injects a packet into the network stack.
Recalculates the checksum before sending unless recalculate_checksum=False is passed.
The injected packet may be one received from recv(), or a modified version, or a completely new packet.
Injected packets can be captured and diverted again by other WinDivert handles with lower priorities.
The remapped function is WinDivertSend::
BOOL WinDivertSend(
__in HANDLE handle,
__in PVOID pPacket,
__in UINT packetLen,
__in PWINDIVERT_ADDRESS pAddr,
__out_opt UINT *sendLen
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_send
:return: The return value is the number of bytes actually sent.
"""
if recalculate_checksum:
packet.recalculate_checksums()
send_len = c_uint(0)
if PY2:
# .from_buffer(memoryview) does not work on PY2
buff = bytearray(packet.raw)
else:
buff = packet.raw
buff = (c_char * len(packet.raw)).from_buffer(buff)
windivert_dll.WinDivertSend(self._handle, buff, len(packet.raw), byref(packet.wd_addr),
byref(send_len))
return send_len
def get_param(self, name):
"""
Get a WinDivert parameter. See pydivert.Param for the list of parameters.
The remapped function is WinDivertGetParam::
BOOL WinDivertGetParam(
__in HANDLE handle,
__in WINDIVERT_PARAM param,
__out UINT64 *pValue
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_get_param
:return: The parameter value.
"""
value = c_uint64(0)
windivert_dll.WinDivertGetParam(self._handle, name, byref(value))
return value.value
def set_param(self, name, value):
"""
Set a WinDivert parameter. See pydivert.Param for the list of parameters.
The remapped function is DivertSetParam::
BOOL WinDivertSetParam(
__in HANDLE handle,
__in WINDIVERT_PARAM param,
__in UINT64 value
);
For more info on the C call visit: http://reqrypt.org/windivert-doc.html#divert_set_param
"""
return windivert_dll.WinDivertSetParam(self._handle, name, value)
|
ffalcinelli/pydivert
|
pydivert/windivert.py
|
Python
|
lgpl-3.0
| 9,345
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Bridge from QWeb(Engine)Settings to our own settings."""
from qutebrowser.config import config
from qutebrowser.utils import log, utils, debug, objreg
UNSET = object()
class Base:
"""Base class for QWeb(Engine)Settings wrappers.
Attributes:
_default: The default value of this setting.
"""
# Needs to be overridden by subclasses in
# webkitsettings.py/webenginesettings.py
GLOBAL_SETTINGS = None
def __init__(self):
self._default = UNSET
def _get_settings(self, settings):
"""Get the QWeb(Engine)Settings object to use.
Args:
settings: The QWeb(Engine)Settings instance to use, or None to use
the global instance.
"""
if settings is None:
return self.GLOBAL_SETTINGS
else:
return settings
def save_default(self, settings=None):
"""Save the default value based on the currently set one.
This does nothing if no getter is configured for this setting.
Args:
settings: The QWeb(Engine)Settings instance to use, or None to use
the global instance.
Return:
The saved default value.
"""
try:
self._default = self.get(settings)
return self._default
except AttributeError:
return None
def restore_default(self, settings=None):
"""Restore the default value from the saved one.
This does nothing if the default has never been set.
Args:
settings: The QWeb(Engine)Settings instance to use, or None to use
the global instance.
"""
if self._default is not UNSET:
log.config.vdebug("Restoring default {!r}.".format(self._default))
self._set(self._default, settings=settings)
def get(self, settings=None):
"""Get the value of this setting.
Must be overridden by subclasses.
Args:
settings: The QWeb(Engine)Settings instance to use, or None to use
the global instance.
"""
raise NotImplementedError
def set(self, value, settings=None):
"""Set the value of this setting.
Args:
value: The value to set.
settings: The QWeb(Engine)Settings instance to use, or None to use
the global instance.
"""
if value is None:
self.restore_default(settings)
else:
self._set(value, settings=settings)
def _set(self, value, settings):
"""Inner function to set the value of this setting.
Must be overridden by subclasses.
Args:
value: The value to set.
settings: The QWeb(Engine)Settings instance to use, or None to use
the global instance.
"""
raise NotImplementedError
class Attribute(Base):
"""A setting set via QWeb(Engine)Settings::setAttribute.
Attributes:
self._attribute: A QWeb(Engine)Settings::WebAttribute instance.
"""
ENUM_BASE = None
def __init__(self, attribute):
super().__init__()
self._attribute = attribute
def __repr__(self):
return utils.get_repr(
self, attribute=debug.qenum_key(self.ENUM_BASE, self._attribute),
constructor=True)
def get(self, settings=None):
return self._get_settings(settings).attribute(self._attribute)
def _set(self, value, settings=None):
self._get_settings(settings).setAttribute(self._attribute, value)
class Setter(Base):
"""A setting set via QWeb(Engine)Settings getter/setter methods.
This will pass the QWeb(Engine)Settings instance ("self") as first argument
to the methods, so self._getter/self._setter are the *unbound* methods.
Attributes:
_getter: The unbound QWeb(Engine)Settings method to get this value, or
None.
_setter: The unbound QWeb(Engine)Settings method to set this value.
_args: An iterable of the arguments to pass to the setter/getter
(before the value, for the setter).
_unpack: Whether to unpack args (True) or pass them directly (False).
"""
def __init__(self, getter, setter, args=(), unpack=False):
super().__init__()
self._getter = getter
self._setter = setter
self._args = args
self._unpack = unpack
def __repr__(self):
return utils.get_repr(self, getter=self._getter, setter=self._setter,
args=self._args, unpack=self._unpack,
constructor=True)
def get(self, settings=None):
if self._getter is None:
raise AttributeError("No getter set!")
return self._getter(self._get_settings(settings), *self._args)
def _set(self, value, settings=None):
args = [self._get_settings(settings)]
args.extend(self._args)
if self._unpack:
args.extend(value)
else:
args.append(value)
self._setter(*args)
class NullStringSetter(Setter):
"""A setter for settings requiring a null QString as default.
This overrides save_default so None is saved for an empty string. This is
needed for the CSS media type, because it returns an empty Python string
when getting the value, but setting it to the default requires passing None
(a null QString) instead of an empty string.
"""
def save_default(self, settings=None):
try:
val = self.get(settings)
except AttributeError:
return None
if val == '':
self._set(None, settings=settings)
else:
self._set(val, settings=settings)
return val
class StaticSetter(Setter):
"""A setting set via static QWeb(Engine)Settings getter/setter methods.
self._getter/self._setter are the *bound* methods.
"""
def get(self, settings=None):
if settings is not None:
raise ValueError("'settings' may not be set with GlobalSetters!")
if self._getter is None:
raise AttributeError("No getter set!")
return self._getter(*self._args)
def _set(self, value, settings=None):
if settings is not None:
raise ValueError("'settings' may not be set with GlobalSetters!")
args = list(self._args)
if self._unpack:
args.extend(value)
else:
args.append(value)
self._setter(*args)
def init_mappings(mappings):
"""Initialize all settings based on a settings mapping."""
for sectname, section in mappings.items():
for optname, mapping in section.items():
default = mapping.save_default()
log.config.vdebug("Saved default for {} -> {}: {!r}".format(
sectname, optname, default))
value = config.get(sectname, optname)
log.config.vdebug("Setting {} -> {} to {!r}".format(
sectname, optname, value))
mapping.set(value)
def update_mappings(mappings, section, option):
"""Update global settings when QWeb(Engine)Settings changed."""
try:
mapping = mappings[section][option]
except KeyError:
return
value = config.get(section, option)
mapping.set(value)
def init():
"""Initialize all QWeb(Engine)Settings."""
if objreg.get('args').backend == 'webengine':
# from qutebrowser.browser.webengine import webenginesettings
# webenginesettings.init()
log.stub('with QtWebEngine')
else:
from qutebrowser.browser.webkit import webkitsettings
webkitsettings.init()
|
halfwit/qutebrowser
|
qutebrowser/config/websettings.py
|
Python
|
gpl-3.0
| 8,561
|
from __future__ import print_function, division
from collections import MutableMapping, defaultdict
from sympy.core import (Add, Mul, Pow, Integer, Number, NumberSymbol,)
from sympy.core.numbers import ImaginaryUnit
from sympy.core.sympify import _sympify
from sympy.core.rules import Transform
from sympy.core.logic import fuzzy_or, fuzzy_and
from sympy.matrices.expressions import MatMul
from sympy.functions.elementary.complexes import Abs
from sympy.assumptions.ask import Q
from sympy.assumptions.assume import Predicate, AppliedPredicate
from sympy.logic.boolalg import (Equivalent, Implies, And, Or,
BooleanFunction, Not)
# APIs here may be subject to change
# XXX: Better name?
class UnevaluatedOnFree(BooleanFunction):
"""
Represents a Boolean function that remains unevaluated on free predicates
This is intended to be a superclass of other classes, which define the
behavior on singly applied predicates.
A free predicate is a predicate that is not applied, or a combination
thereof. For example, Q.zero or Or(Q.positive, Q.negative).
A singly applied predicate is a free predicate applied everywhere to a
single expression. For instance, Q.zero(x) and Or(Q.positive(x*y),
Q.negative(x*y)) are singly applied, but Or(Q.positive(x), Q.negative(y))
and Or(Q.positive, Q.negative(y)) are not.
The boolean literals True and False are considered to be both free and
singly applied.
This class raises ValueError unless the input is a free predicate or a
singly applied predicate.
On a free predicate, this class remains unevaluated. On a singly applied
predicate, the method apply() is called and returned, or the original
expression returned if apply() returns None. When apply() is called,
self.expr is set to the unique expression that the predicates are applied
at. self.pred is set to the free form of the predicate.
The typical usage is to create this class with free predicates and
evaluate it using .rcall().
"""
def __new__(cls, arg):
# Mostly type checking here
arg = _sympify(arg)
predicates = arg.atoms(Predicate)
applied_predicates = arg.atoms(AppliedPredicate)
if predicates and applied_predicates:
raise ValueError("arg must be either completely free or singly applied")
if not applied_predicates:
obj = BooleanFunction.__new__(cls, arg)
obj.pred = arg
obj.expr = None
return obj
predicate_args = {pred.args[0] for pred in applied_predicates}
if len(predicate_args) > 1:
raise ValueError("The AppliedPredicates in arg must be applied to a single expression.")
obj = BooleanFunction.__new__(cls, arg)
obj.expr = predicate_args.pop()
obj.pred = arg.xreplace(Transform(lambda e: e.func, lambda e:
isinstance(e, AppliedPredicate)))
applied = obj.apply()
if applied is None:
return obj
return applied
def apply(self):
return
class AllArgs(UnevaluatedOnFree):
"""
Class representing vectorizing a predicate over all the .args of an
expression
See the docstring of UnevaluatedOnFree for more information on this
class.
The typical usage is to evaluate predicates with expressions using .rcall().
Example
=======
>>> from sympy.assumptions.sathandlers import AllArgs
>>> from sympy import symbols, Q
>>> x, y = symbols('x y')
>>> a = AllArgs(Q.positive | Q.negative)
>>> a
AllArgs(Q.negative | Q.positive)
>>> a.rcall(x*y)
(Q.negative(x) | Q.positive(x)) & (Q.negative(y) | Q.positive(y))
"""
def apply(self):
return And(*[self.pred.rcall(arg) for arg in self.expr.args])
class AnyArgs(UnevaluatedOnFree):
"""
Class representing vectorizing a predicate over any of the .args of an
expression.
See the docstring of UnevaluatedOnFree for more information on this
class.
The typical usage is to evaluate predicates with expressions using .rcall().
Example
=======
>>> from sympy.assumptions.sathandlers import AnyArgs
>>> from sympy import symbols, Q
>>> x, y = symbols('x y')
>>> a = AnyArgs(Q.positive & Q.negative)
>>> a
AnyArgs(Q.negative & Q.positive)
>>> a.rcall(x*y)
(Q.negative(x) & Q.positive(x)) | (Q.negative(y) & Q.positive(y))
"""
def apply(self):
return Or(*[self.pred.rcall(arg) for arg in self.expr.args])
class ExactlyOneArg(UnevaluatedOnFree):
"""
Class representing a predicate holding on exactly one of the .args of an
expression.
See the docstring of UnevaluatedOnFree for more information on this
class.
The typical usage is to evaluate predicate with expressions using
.rcall().
Example
=======
>>> from sympy.assumptions.sathandlers import ExactlyOneArg
>>> from sympy import symbols, Q
>>> x, y = symbols('x y')
>>> a = ExactlyOneArg(Q.positive)
>>> a
ExactlyOneArg(Q.positive)
>>> a.rcall(x*y)
(Q.positive(x) & ~Q.positive(y)) | (Q.positive(y) & ~Q.positive(x))
"""
def apply(self):
expr = self.expr
pred = self.pred
pred_args = [pred.rcall(arg) for arg in expr.args]
# Technically this is xor, but if one term in the disjunction is true,
# it is not possible for the remainder to be true, so regular or is
# fine in this case.
return Or(*[And(pred_args[i], *map(Not, pred_args[:i] +
pred_args[i+1:])) for i in range(len(pred_args))])
# Note: this is the equivalent cnf form. The above is more efficient
# as the first argument of an implication, since p >> q is the same as
# q | ~p, so the the ~ will convert the Or to and, and one just needs
# to distribute the q across it to get to cnf.
# return And(*[Or(*map(Not, c)) for c in combinations(pred_args, 2)]) & Or(*pred_args)
def _old_assump_replacer(obj):
# Things to be careful of:
# - real means real or infinite in the old assumptions.
# - nonzero does not imply real in the old assumptions.
# - finite means finite and not zero in the old assumptions.
if not isinstance(obj, AppliedPredicate):
return obj
e = obj.args[0]
ret = None
if obj.func == Q.positive:
ret = fuzzy_and([e.is_finite, e.is_positive])
if obj.func == Q.zero:
ret = e.is_zero
if obj.func == Q.negative:
ret = fuzzy_and([e.is_finite, e.is_negative])
if obj.func == Q.nonpositive:
ret = fuzzy_and([e.is_finite, e.is_nonpositive])
if obj.func == Q.nonzero:
ret = fuzzy_and([e.is_nonzero, e.is_finite])
if obj.func == Q.nonnegative:
ret = fuzzy_and([fuzzy_or([e.is_zero, e.is_finite]),
e.is_nonnegative])
if obj.func == Q.rational:
ret = e.is_rational
if obj.func == Q.irrational:
ret = e.is_irrational
if obj.func == Q.even:
ret = e.is_even
if obj.func == Q.odd:
ret = e.is_odd
if obj.func == Q.integer:
ret = e.is_integer
if obj.func == Q.imaginary:
ret = e.is_imaginary
if obj.func == Q.commutative:
ret = e.is_commutative
if ret is None:
return obj
return ret
def evaluate_old_assump(pred):
"""
Replace assumptions of expressions replaced with their values in the old
assumptions (like Q.negative(-1) => True). Useful because some direct
computations for numeric objects is defined most conveniently in the old
assumptions.
"""
return pred.xreplace(Transform(_old_assump_replacer))
class CheckOldAssump(UnevaluatedOnFree):
def apply(self):
return Equivalent(self.args[0], evaluate_old_assump(self.args[0]))
class CheckIsPrime(UnevaluatedOnFree):
def apply(self):
from sympy import isprime
return Equivalent(self.args[0], isprime(self.expr))
class CustomLambda(object):
"""
Interface to lambda with rcall
Workaround until we get a better way to represent certain facts.
"""
def __init__(self, lamda):
self.lamda = lamda
def rcall(self, *args):
return self.lamda(*args)
class ClassFactRegistry(MutableMapping):
"""
Register handlers against classes
``registry[C] = handler`` registers ``handler`` for class
``C``. ``registry[C]`` returns a set of handlers for class ``C``, or any
of its superclasses.
"""
def __init__(self, d=None):
d = d or {}
self.d = defaultdict(frozenset, d)
super(ClassFactRegistry, self).__init__()
def __setitem__(self, key, item):
self.d[key] = frozenset(item)
def __getitem__(self, key):
ret = self.d[key]
for k in self.d:
if issubclass(key, k):
ret |= self.d[k]
return ret
def __delitem__(self, key):
del self.d[key]
def __iter__(self):
return self.d.__iter__()
def __len__(self):
return len(self.d)
def __repr__(self):
return repr(self.d)
fact_registry = ClassFactRegistry()
def register_fact(klass, fact, registry=fact_registry):
registry[klass] |= {fact}
for klass, fact in [
(Mul, Equivalent(Q.zero, AnyArgs(Q.zero))),
(MatMul, Implies(AllArgs(Q.square), Equivalent(Q.invertible, AllArgs(Q.invertible)))),
(Add, Implies(AllArgs(Q.positive), Q.positive)),
(Add, Implies(AllArgs(Q.negative), Q.negative)),
(Mul, Implies(AllArgs(Q.positive), Q.positive)),
(Mul, Implies(AllArgs(Q.commutative), Q.commutative)),
(Mul, Implies(AllArgs(Q.real), Q.commutative)),
(Pow, CustomLambda(lambda power: Implies(Q.real(power.base) &
Q.even(power.exp) & Q.nonnegative(power.exp), Q.nonnegative(power)))),
(Pow, CustomLambda(lambda power: Implies(Q.nonnegative(power.base) & Q.odd(power.exp) & Q.nonnegative(power.exp), Q.nonnegative(power)))),
(Pow, CustomLambda(lambda power: Implies(Q.nonpositive(power.base) & Q.odd(power.exp) & Q.nonnegative(power.exp), Q.nonpositive(power)))),
# This one can still be made easier to read. I think we need basic pattern
# matching, so that we can just write Equivalent(Q.zero(x**y), Q.zero(x) & Q.positive(y))
(Pow, CustomLambda(lambda power: Equivalent(Q.zero(power), Q.zero(power.base) & Q.positive(power.exp)))),
(Integer, CheckIsPrime(Q.prime)),
# Implicitly assumes Mul has more than one arg
# Would be AllArgs(Q.prime | Q.composite) except 1 is composite
(Mul, Implies(AllArgs(Q.prime), ~Q.prime)),
# More advanced prime assumptions will require inequalities, as 1 provides
# a corner case.
(Mul, Implies(AllArgs(Q.imaginary | Q.real), Implies(ExactlyOneArg(Q.imaginary), Q.imaginary))),
(Mul, Implies(AllArgs(Q.real), Q.real)),
(Add, Implies(AllArgs(Q.real), Q.real)),
# General Case: Odd number of imaginary args implies mul is imaginary(To be implemented)
(Mul, Implies(AllArgs(Q.real), Implies(ExactlyOneArg(Q.irrational),
Q.irrational))),
(Add, Implies(AllArgs(Q.real), Implies(ExactlyOneArg(Q.irrational),
Q.irrational))),
(Mul, Implies(AllArgs(Q.rational), Q.rational)),
(Add, Implies(AllArgs(Q.rational), Q.rational)),
(Abs, Q.nonnegative),
(Abs, Equivalent(AllArgs(~Q.zero), ~Q.zero)),
# Including the integer qualification means we don't need to add any facts
# for odd, since the assumptions already know that every integer is
# exactly one of even or odd.
(Mul, Implies(AllArgs(Q.integer), Equivalent(AnyArgs(Q.even), Q.even))),
(Abs, Implies(AllArgs(Q.even), Q.even)),
(Abs, Implies(AllArgs(Q.odd), Q.odd)),
(Add, Implies(AllArgs(Q.integer), Q.integer)),
(Add, Implies(ExactlyOneArg(~Q.integer), ~Q.integer)),
(Mul, Implies(AllArgs(Q.integer), Q.integer)),
(Mul, Implies(ExactlyOneArg(~Q.rational), ~Q.integer)),
(Abs, Implies(AllArgs(Q.integer), Q.integer)),
(Number, CheckOldAssump(Q.negative)),
(Number, CheckOldAssump(Q.zero)),
(Number, CheckOldAssump(Q.positive)),
(Number, CheckOldAssump(Q.nonnegative)),
(Number, CheckOldAssump(Q.nonzero)),
(Number, CheckOldAssump(Q.nonpositive)),
(Number, CheckOldAssump(Q.rational)),
(Number, CheckOldAssump(Q.irrational)),
(Number, CheckOldAssump(Q.even)),
(Number, CheckOldAssump(Q.odd)),
(Number, CheckOldAssump(Q.integer)),
(Number, CheckOldAssump(Q.imaginary)),
# For some reason NumberSymbol does not subclass Number
(NumberSymbol, CheckOldAssump(Q.negative)),
(NumberSymbol, CheckOldAssump(Q.zero)),
(NumberSymbol, CheckOldAssump(Q.positive)),
(NumberSymbol, CheckOldAssump(Q.nonnegative)),
(NumberSymbol, CheckOldAssump(Q.nonzero)),
(NumberSymbol, CheckOldAssump(Q.nonpositive)),
(NumberSymbol, CheckOldAssump(Q.rational)),
(NumberSymbol, CheckOldAssump(Q.irrational)),
(NumberSymbol, CheckOldAssump(Q.imaginary)),
(ImaginaryUnit, CheckOldAssump(Q.negative)),
(ImaginaryUnit, CheckOldAssump(Q.zero)),
(ImaginaryUnit, CheckOldAssump(Q.positive)),
(ImaginaryUnit, CheckOldAssump(Q.nonnegative)),
(ImaginaryUnit, CheckOldAssump(Q.nonzero)),
(ImaginaryUnit, CheckOldAssump(Q.nonpositive)),
(ImaginaryUnit, CheckOldAssump(Q.rational)),
(ImaginaryUnit, CheckOldAssump(Q.irrational)),
(ImaginaryUnit, CheckOldAssump(Q.imaginary))
]:
register_fact(klass, fact)
|
NikNitro/Python-iBeacon-Scan
|
sympy/assumptions/sathandlers.py
|
Python
|
gpl-3.0
| 13,472
|
import unittest
import mox
from layers import *
####
# Test Data
####
TEST_LAYER_PROPERTIES = dict((str(k), v) for k, v in {u'division_lod_min': None, u'compressed': True, u'busy': None, u'description': None, u'dynamic_balloons': False, u'auto_managed': False, u'division_lod_min_fade': None, u'division_lod_max_fade': None, u'division_lod_max': None, u'item_type': None, u'custom_kml': None, u'world': u'mars', u'uncacheable': False, u'icon': None, u'baked': None, u'division_size': None, u'contents': [], u'name': u'test_layer'}.items() )
TEST_LAYER_NAME = TEST_LAYER_PROPERTIES['name']
TEST_LAYER_ID = 1
LOREM_IPSUM = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec laoreet aliquet eleifend."
####
mock_LayersManagerClient = None # As the layers module retains a persistant client, so must the tests
class LayerTest(unittest.TestCase):
def setUp(self):
global mock_LayersManagerClient
self.mox = mox.Mox()
# Replace the mock_LayersManagerClient constructor with a stub-yielding method
if not mock_LayersManagerClient:
mock_LayersManagerClient = self.mox.CreateMock(lmc.LayersManagerClient)
else:
self.mox._mock_objects.append(mock_LayersManagerClient) # so ReplayAll / ResetAll will still work
self.mock_LayersManagerClient = mock_LayersManagerClient
def getLmc(*args): return self.mock_LayersManagerClient
self.mox.stubs.Set(lmc, 'LayersManagerClient', getLmc)
def tearDown(self):
self.mox.VerifyAll()
self.mox.ResetAll()
self.mox.UnsetStubs()
lmc._cms_client = None
def test_get_default_client(self):
self.mox.ReplayAll()
client = get_default_client()
self.assert_(isinstance(client, mox.MockObject))
self.assert_(hasattr(client, 'Create'))
self.assertEqual(get_default_client(), client) # make sure repeat calls yield the same object
def test_create_layer(self):
# TODO: test adding icons in create / update cases
self.mock_LayersManagerClient.Create('layer', 0, name=TEST_LAYER_NAME, world='mars').AndReturn(TEST_LAYER_ID)
self.mox.ReplayAll()
layer = Layer(name='test_layer')
self.assertRaises(MissingPropertyError, layer.save) # world is a required argument
layer = Layer(name='test_layer', world='mars')
layer.save()
self.assertEquals(layer.id, TEST_LAYER_ID)
self.assertEquals( layer.layer_id, TEST_LAYER_ID)
def test_create_layer_with_icon(self):
# TODO: test adding icons in create / update cases
icon_url = 'http://somedomain.net/path/to/icon.png'
icon_id = 3
self.mock_LayersManagerClient.Create('layer', 0, name=TEST_LAYER_NAME, world='mars', icon=icon_url).AndReturn(TEST_LAYER_ID)
self.mox.ReplayAll()
layer = Layer(name='test_layer', world='mars', icon=icon_url)
layer.save()
self.assertEquals(layer.id, TEST_LAYER_ID)
self.assertEquals( layer.layer_id, TEST_LAYER_ID)
def _test_lmc_tests(self): # this test is disabled
"""
This is just to make sure the mock LayersManagerClient constructor
always yields the same object.
"""
a = lmc.LayersManagerClient('fee','fi','fo')
self.assertEqual(a, self.mock_LayersManagerClient)
a = lmc.LayersManagerClient('fee','fi','fo')
self.assertEqual(a, self.mock_LayersManagerClient)
def test_properties(self):
self.mock_LayersManagerClient.Create('layer', 0, name=TEST_LAYER_NAME, world='mars').AndReturn(TEST_LAYER_ID)
self.mock_LayersManagerClient.Query('layer', TEST_LAYER_ID, TEST_LAYER_ID).AndReturn(TEST_LAYER_PROPERTIES)
newprops = TEST_LAYER_PROPERTIES.copy()
newprops.update({'description': LOREM_IPSUM})
self.mock_LayersManagerClient.Update('layer', 1, **newprops)
self.mox.ReplayAll()
layer = Layer(name='test_layer', world='mars')
self.assertEqual(layer.dynamic_balloons, None) # Trying to access an unset property before the layer is saved.
layer.save()
mox.Replay(self.mock_LayersManagerClient)
# RETRIEVE PROPERTIES
self.assertEqual(layer.dynamic_balloons, False) # Should have the side effect of loading the properties from the cms
self.assertEqual(layer._properties, TEST_LAYER_PROPERTIES)
# UPDATE PROPERTIES
layer.description = LOREM_IPSUM
self.assertEqual(layer.description, LOREM_IPSUM)
layer.save()
self.assertEqual(layer.description, LOREM_IPSUM)
if __name__ == '__main__':
unittest.main()
|
deleted/kml-layer-manager
|
client/layers_test.py
|
Python
|
apache-2.0
| 4,670
|
import atexit
import logging
import logging.handlers
from threading import Lock
import inspect
import sys
from django.db.models import Model
from types import ModuleType
logger_cache = {}
logger_cache_lock = Lock()
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
class LogWrapperForObject(object):
"""This class masks the logger in order to print out the id of the object to identify it.
Args:
logger: The logger to mask
o: The object that the logger was made for
id: If the object is not likely to provide ``pk`` on its own, then pass something here
"""
WRAPPED_FIELDS = ["trace", "debug", "info", "warning", "error", "exception"]
def __init__(self, logger, o, id=None):
self._logger = logger
self._o = o
self._id = id
def __getattr__(self, attr):
if attr not in self.WRAPPED_FIELDS:
raise AttributeError("Could not find {}".format(attr))
result = getattr(self._logger, attr)
def _log(s, *args, **kwargs):
if self._id is not None:
s = "[{}] {}".format(self._id, s)
elif hasattr(self._o, "pk"):
s = "[{}] {}".format(self._o.pk, s)
else:
s = "[{}] {}".format(str(self._o), s)
return result(s, *args, **kwargs)
return _log
def create_logger(o, additional_id=None):
"""Creates a logger that has its filename derived from the passed object's properties.
The logger is targeted at the logserver.
Args:
o: Object to create logger for.
additional_id: If the object does not provide ``pk``, then you can pass this parameter.
Returns:
Instance of logger.
"""
wrap = None
if isinstance(o, str):
if o in sys.modules:
# str -> module
return create_logger(sys.modules[o], additional_id)
logger_name = o
elif isinstance(o, ModuleType):
module_name = o.__name__
logger_name = module_name
if additional_id is not None:
wrap = LogWrapperForObject
else:
module_name = o.__module__
try:
o_name = o.__name__
except AttributeError:
o_name = type(o).__name__
logger_name = "{}.{}".format(module_name, o_name)
if isinstance(o, Model) or (inspect.isclass(o) and issubclass(o, Model)):
wrap = LogWrapperForObject
with logger_cache_lock:
if None not in logger_cache:
logger = logging.getLogger()
logger.setLevel(logging.INFO)
socket_handler = logging.handlers.SocketHandler(
"localhost", logging.handlers.DEFAULT_TCP_LOGGING_PORT)
atexit.register(socket_handler.close)
logger.addHandler(socket_handler)
logger_cache[None] = logger
if logger_name not in logger_cache:
logger_cache[logger_name] = logging.getLogger(logger_name)
result = logger_cache[logger_name]
if wrap:
return wrap(result, o, additional_id)
else:
return result
|
nachandr/cfme_tests
|
sprout/sprout/log.py
|
Python
|
gpl-2.0
| 3,154
|
"""
Tests for StaticContentServer
"""
import copy
import datetime
import ddt
import logging
import unittest
from uuid import uuid4
from django.conf import settings
from django.test import RequestFactory
from django.test.client import Client
from django.test.utils import override_settings
from mock import patch
from xmodule.contentstore.django import contentstore
from xmodule.contentstore.content import StaticContent
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.xml_importer import import_course_from_xml
from xmodule.assetstore.assetmgr import AssetManager
from opaque_keys import InvalidKeyError
from xmodule.modulestore.exceptions import ItemNotFoundError
from contentserver.middleware import parse_range_header, HTTP_DATE_FORMAT, StaticContentServer
from student.models import CourseEnrollment
from student.tests.factories import UserFactory, AdminFactory
log = logging.getLogger(__name__)
TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE)
TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'] = 'test_xcontent_%s' % uuid4().hex
TEST_DATA_DIR = settings.COMMON_TEST_DATA_ROOT
FAKE_MD5_HASH = 'ffffffffffffffffffffffffffffffff'
def get_versioned_asset_url(asset_path):
"""
Creates a versioned asset URL.
"""
try:
locator = StaticContent.get_location_from_path(asset_path)
content = AssetManager.find(locator, as_stream=True)
return StaticContent.add_version_to_asset_path(asset_path, content.content_digest)
except (InvalidKeyError, ItemNotFoundError):
pass
return asset_path
@ddt.ddt
@override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE)
class ContentStoreToyCourseTest(SharedModuleStoreTestCase):
"""
Tests that use the toy course.
"""
@classmethod
def setUpClass(cls):
super(ContentStoreToyCourseTest, cls).setUpClass()
cls.contentstore = contentstore()
cls.modulestore = modulestore()
cls.course_key = cls.modulestore.make_course_key('edX', 'toy', '2012_Fall')
import_course_from_xml(
cls.modulestore, 1, TEST_DATA_DIR, ['toy'],
static_content_store=cls.contentstore, verbose=True
)
# A locked asset
cls.locked_asset = cls.course_key.make_asset_key('asset', 'sample_static.html')
cls.url_locked = unicode(cls.locked_asset)
cls.url_locked_versioned = get_versioned_asset_url(cls.url_locked)
cls.contentstore.set_attr(cls.locked_asset, 'locked', True)
# An unlocked asset
cls.unlocked_asset = cls.course_key.make_asset_key('asset', 'another_static.txt')
cls.url_unlocked = unicode(cls.unlocked_asset)
cls.url_unlocked_versioned = get_versioned_asset_url(cls.url_unlocked)
cls.length_unlocked = cls.contentstore.get_attr(cls.unlocked_asset, 'length')
def setUp(self):
"""
Create user and login.
"""
super(ContentStoreToyCourseTest, self).setUp()
self.staff_usr = AdminFactory.create()
self.non_staff_usr = UserFactory.create()
self.client = Client()
def test_unlocked_asset(self):
"""
Test that unlocked assets are being served.
"""
self.client.logout()
resp = self.client.get(self.url_unlocked)
self.assertEqual(resp.status_code, 200)
def test_unlocked_versioned_asset(self):
"""
Test that unlocked assets that are versioned are being served.
"""
self.client.logout()
resp = self.client.get(self.url_unlocked_versioned)
self.assertEqual(resp.status_code, 200)
def test_unlocked_versioned_asset_with_nonexistent_version(self):
"""
Test that unlocked assets that are versioned, but have a nonexistent version,
are sent back as a 301 redirect which tells the caller the correct URL.
"""
url_unlocked_versioned_old = StaticContent.add_version_to_asset_path(self.url_unlocked, FAKE_MD5_HASH)
self.client.logout()
resp = self.client.get(url_unlocked_versioned_old)
self.assertEqual(resp.status_code, 301)
self.assertTrue(resp.url.endswith(self.url_unlocked_versioned)) # pylint: disable=no-member
def test_locked_versioned_asset(self):
"""
Test that locked assets that are versioned are being served.
"""
CourseEnrollment.enroll(self.non_staff_usr, self.course_key)
self.assertTrue(CourseEnrollment.is_enrolled(self.non_staff_usr, self.course_key))
self.client.login(username=self.non_staff_usr, password='test')
resp = self.client.get(self.url_locked_versioned)
self.assertEqual(resp.status_code, 200)
def test_locked_asset_not_logged_in(self):
"""
Test that locked assets behave appropriately in case the user is not
logged in.
"""
self.client.logout()
resp = self.client.get(self.url_locked)
self.assertEqual(resp.status_code, 403)
def test_locked_asset_not_registered(self):
"""
Test that locked assets behave appropriately in case user is logged in
in but not registered for the course.
"""
self.client.login(username=self.non_staff_usr, password='test')
resp = self.client.get(self.url_locked)
self.assertEqual(resp.status_code, 403)
def test_locked_asset_registered(self):
"""
Test that locked assets behave appropriately in case user is logged in
and registered for the course.
"""
CourseEnrollment.enroll(self.non_staff_usr, self.course_key)
self.assertTrue(CourseEnrollment.is_enrolled(self.non_staff_usr, self.course_key))
self.client.login(username=self.non_staff_usr, password='test')
resp = self.client.get(self.url_locked)
self.assertEqual(resp.status_code, 200)
def test_locked_asset_staff(self):
"""
Test that locked assets behave appropriately in case user is staff.
"""
self.client.login(username=self.staff_usr, password='test')
resp = self.client.get(self.url_locked)
self.assertEqual(resp.status_code, 200)
def test_range_request_full_file(self):
"""
Test that a range request from byte 0 to last,
outputs partial content status code and valid Content-Range and Content-Length.
"""
resp = self.client.get(self.url_unlocked, HTTP_RANGE='bytes=0-')
self.assertEqual(resp.status_code, 206) # HTTP_206_PARTIAL_CONTENT
self.assertEqual(
resp['Content-Range'],
'bytes {first}-{last}/{length}'.format(
first=0, last=self.length_unlocked - 1,
length=self.length_unlocked
)
)
self.assertEqual(resp['Content-Length'], str(self.length_unlocked))
def test_range_request_partial_file(self):
"""
Test that a range request for a partial file,
outputs partial content status code and valid Content-Range and Content-Length.
first_byte and last_byte are chosen to be simple but non trivial values.
"""
first_byte = self.length_unlocked / 4
last_byte = self.length_unlocked / 2
resp = self.client.get(self.url_unlocked, HTTP_RANGE='bytes={first}-{last}'.format(
first=first_byte, last=last_byte))
self.assertEqual(resp.status_code, 206) # HTTP_206_PARTIAL_CONTENT
self.assertEqual(resp['Content-Range'], 'bytes {first}-{last}/{length}'.format(
first=first_byte, last=last_byte, length=self.length_unlocked))
self.assertEqual(resp['Content-Length'], str(last_byte - first_byte + 1))
def test_range_request_multiple_ranges(self):
"""
Test that multiple ranges in request outputs the full content.
"""
first_byte = self.length_unlocked / 4
last_byte = self.length_unlocked / 2
resp = self.client.get(self.url_unlocked, HTTP_RANGE='bytes={first}-{last}, -100'.format(
first=first_byte, last=last_byte))
self.assertEqual(resp.status_code, 200)
self.assertNotIn('Content-Range', resp)
self.assertEqual(resp['Content-Length'], str(self.length_unlocked))
@ddt.data(
'bytes 0-',
'bits=0-',
'bytes=0',
'bytes=one-',
)
def test_syntax_errors_in_range(self, header_value):
"""
Test that syntactically invalid Range values result in a 200 OK full content response.
"""
resp = self.client.get(self.url_unlocked, HTTP_RANGE=header_value)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('Content-Range', resp)
def test_range_request_malformed_invalid_range(self):
"""
Test that a range request with malformed Range (first_byte > last_byte) outputs
416 Requested Range Not Satisfiable.
"""
resp = self.client.get(self.url_unlocked, HTTP_RANGE='bytes={first}-{last}'.format(
first=(self.length_unlocked / 2), last=(self.length_unlocked / 4)))
self.assertEqual(resp.status_code, 416)
def test_range_request_malformed_out_of_bounds(self):
"""
Test that a range request with malformed Range (first_byte, last_byte == totalLength, offset by 1 error)
outputs 416 Requested Range Not Satisfiable.
"""
resp = self.client.get(self.url_unlocked, HTTP_RANGE='bytes={first}-{last}'.format(
first=(self.length_unlocked), last=(self.length_unlocked)))
self.assertEqual(resp.status_code, 416)
def test_vary_header_sent(self):
"""
Tests that we're properly setting the Vary header to ensure browser requests don't get
cached in a way that breaks XHR requests to the same asset.
"""
resp = self.client.get(self.url_unlocked)
self.assertEqual(resp.status_code, 200)
self.assertEquals('Origin', resp['Vary'])
@patch('contentserver.models.CourseAssetCacheTtlConfig.get_cache_ttl')
def test_cache_headers_with_ttl_unlocked(self, mock_get_cache_ttl):
"""
Tests that when a cache TTL is set, an unlocked asset will be sent back with
the correct cache control/expires headers.
"""
mock_get_cache_ttl.return_value = 10
resp = self.client.get(self.url_unlocked)
self.assertEqual(resp.status_code, 200)
self.assertIn('Expires', resp)
self.assertEquals('public, max-age=10, s-maxage=10', resp['Cache-Control'])
@patch('contentserver.models.CourseAssetCacheTtlConfig.get_cache_ttl')
def test_cache_headers_with_ttl_locked(self, mock_get_cache_ttl):
"""
Tests that when a cache TTL is set, a locked asset will be sent back without
any cache control/expires headers.
"""
mock_get_cache_ttl.return_value = 10
CourseEnrollment.enroll(self.non_staff_usr, self.course_key)
self.assertTrue(CourseEnrollment.is_enrolled(self.non_staff_usr, self.course_key))
self.client.login(username=self.non_staff_usr, password='test')
resp = self.client.get(self.url_locked)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('Expires', resp)
self.assertEquals('private, no-cache, no-store', resp['Cache-Control'])
@patch('contentserver.models.CourseAssetCacheTtlConfig.get_cache_ttl')
def test_cache_headers_without_ttl_unlocked(self, mock_get_cache_ttl):
"""
Tests that when a cache TTL is not set, an unlocked asset will be sent back without
any cache control/expires headers.
"""
mock_get_cache_ttl.return_value = 0
resp = self.client.get(self.url_unlocked)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('Expires', resp)
self.assertNotIn('Cache-Control', resp)
@patch('contentserver.models.CourseAssetCacheTtlConfig.get_cache_ttl')
def test_cache_headers_without_ttl_locked(self, mock_get_cache_ttl):
"""
Tests that when a cache TTL is not set, a locked asset will be sent back with a
cache-control header that indicates this asset should not be cached.
"""
mock_get_cache_ttl.return_value = 0
CourseEnrollment.enroll(self.non_staff_usr, self.course_key)
self.assertTrue(CourseEnrollment.is_enrolled(self.non_staff_usr, self.course_key))
self.client.login(username=self.non_staff_usr, password='test')
resp = self.client.get(self.url_locked)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('Expires', resp)
self.assertEquals('private, no-cache, no-store', resp['Cache-Control'])
def test_get_expiration_value(self):
start_dt = datetime.datetime.strptime("Thu, 01 Dec 1983 20:00:00 GMT", HTTP_DATE_FORMAT)
near_expire_dt = StaticContentServer.get_expiration_value(start_dt, 55)
self.assertEqual("Thu, 01 Dec 1983 20:00:55 GMT", near_expire_dt)
@patch('contentserver.models.CdnUserAgentsConfig.get_cdn_user_agents')
def test_cache_is_cdn_with_normal_request(self, mock_get_cdn_user_agents):
"""
Tests that when a normal request is made -- i.e. from an end user with their
browser -- that we don't classify the request as coming from a CDN.
"""
mock_get_cdn_user_agents.return_value = 'Amazon CloudFront'
request_factory = RequestFactory()
browser_request = request_factory.get('/fake', HTTP_USER_AGENT='Chrome 1234')
is_from_cdn = StaticContentServer.is_cdn_request(browser_request)
self.assertEqual(is_from_cdn, False)
@patch('contentserver.models.CdnUserAgentsConfig.get_cdn_user_agents')
def test_cache_is_cdn_with_cdn_request(self, mock_get_cdn_user_agents):
"""
Tests that when a CDN request is made -- i.e. from an edge node back to the
origin -- that we classify the request as coming from a CDN.
"""
mock_get_cdn_user_agents.return_value = 'Amazon CloudFront'
request_factory = RequestFactory()
browser_request = request_factory.get('/fake', HTTP_USER_AGENT='Amazon CloudFront')
is_from_cdn = StaticContentServer.is_cdn_request(browser_request)
self.assertEqual(is_from_cdn, True)
@patch('contentserver.models.CdnUserAgentsConfig.get_cdn_user_agents')
def test_cache_is_cdn_with_cdn_request_multiple_user_agents(self, mock_get_cdn_user_agents):
"""
Tests that when a CDN request is made -- i.e. from an edge node back to the
origin -- that we classify the request as coming from a CDN when multiple UAs
are configured.
"""
mock_get_cdn_user_agents.return_value = 'Amazon CloudFront\nAkamai GHost'
request_factory = RequestFactory()
browser_request = request_factory.get('/fake', HTTP_USER_AGENT='Amazon CloudFront')
is_from_cdn = StaticContentServer.is_cdn_request(browser_request)
self.assertEqual(is_from_cdn, True)
@ddt.ddt
class ParseRangeHeaderTestCase(unittest.TestCase):
"""
Tests for the parse_range_header function.
"""
def setUp(self):
super(ParseRangeHeaderTestCase, self).setUp()
self.content_length = 10000
def test_bytes_unit(self):
unit, __ = parse_range_header('bytes=100-', self.content_length)
self.assertEqual(unit, 'bytes')
@ddt.data(
('bytes=100-', 1, [(100, 9999)]),
('bytes=1000-', 1, [(1000, 9999)]),
('bytes=100-199, 200-', 2, [(100, 199), (200, 9999)]),
('bytes=100-199, 200-499', 2, [(100, 199), (200, 499)]),
('bytes=-100', 1, [(9900, 9999)]),
('bytes=-100, -200', 2, [(9900, 9999), (9800, 9999)])
)
@ddt.unpack
def test_valid_syntax(self, header_value, excepted_ranges_length, expected_ranges):
__, ranges = parse_range_header(header_value, self.content_length)
self.assertEqual(len(ranges), excepted_ranges_length)
self.assertEqual(ranges, expected_ranges)
@ddt.data(
('bytes=one-20', ValueError, 'invalid literal for int()'),
('bytes=-one', ValueError, 'invalid literal for int()'),
('bytes=-', ValueError, 'invalid literal for int()'),
('bytes=--', ValueError, 'invalid literal for int()'),
('bytes', ValueError, 'Invalid syntax'),
('bytes=', ValueError, 'Invalid syntax'),
('bytes=0', ValueError, 'Invalid syntax'),
('bytes=0-10,0', ValueError, 'Invalid syntax'),
('bytes=0=', ValueError, 'too many values to unpack'),
)
@ddt.unpack
def test_invalid_syntax(self, header_value, exception_class, exception_message_regex):
self.assertRaisesRegexp(
exception_class, exception_message_regex, parse_range_header, header_value, self.content_length
)
|
shabab12/edx-platform
|
common/djangoapps/contentserver/test/test_contentserver.py
|
Python
|
agpl-3.0
| 16,959
|
# Source and destination file names.
test_source = "data/math.txt"
test_destination = "math_output_latex.html"
# Keyword parameters passed to publish_file.
reader_name = "standalone"
parser_name = "rst"
writer_name = "html"
# Settings
settings_overrides['math_output'] = 'latex'
# local copy of default stylesheet:
settings_overrides['stylesheet_path'] = (
'functional/input/data/html4css1.css')
|
waseem18/oh-mainline
|
vendor/packages/docutils/test/functional/tests/math_output_latex.py
|
Python
|
agpl-3.0
| 405
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from page_sets import page_cycler_story
from telemetry.page import cache_temperature as cache_temperature_module
from telemetry.page import shared_page_state
from telemetry import story
class IntlArFaHePage(page_cycler_story.PageCyclerStory):
def __init__(self, url, page_set, cache_temperature=None):
if cache_temperature == cache_temperature_module.COLD:
temp_suffix = '_cold'
elif cache_temperature == cache_temperature_module.WARM:
temp_suffix = '_warm'
else:
raise NotImplementedError
super(IntlArFaHePage, self).__init__(
url=url, page_set=page_set,
shared_page_state_class=shared_page_state.SharedDesktopPageState,
cache_temperature=cache_temperature,
name=url + temp_suffix)
class IntlArFaHePageSet(story.StorySet):
""" Popular pages in right-to-left languages Arabic, Farsi and Hebrew. """
def __init__(self, cache_temperatures=(cache_temperature_module.COLD,
cache_temperature_module.WARM)):
super(IntlArFaHePageSet, self).__init__(
archive_data_file='data/intl_ar_fa_he.json',
cloud_storage_bucket=story.PARTNER_BUCKET)
if cache_temperatures is None:
cache_temperatures = [cache_temperature_module.ANY]
urls_list = [
'http://msn.co.il/',
'http://ynet.co.il/',
'http://www.islamweb.net/',
'http://farsnews.com/',
'http://www.masrawy.com/',
'http://www.startimes.com/f.aspx',
'http://www.aljayyash.net/',
'http://www.google.com.sa/'
]
for url in urls_list:
for temp in cache_temperatures:
self.AddStory(IntlArFaHePage(url, self, cache_temperature=temp))
|
scheib/chromium
|
tools/perf/page_sets/intl_ar_fa_he.py
|
Python
|
bsd-3-clause
| 1,849
|
from flask import request
from werkzeug.exceptions import NotFound
from rdr_service.api.base_api import BaseApi
from rdr_service.api_util import PTC_AND_HEALTHPRO
from rdr_service.app_util import auth_required
from rdr_service.dao.hpo_dao import HPODao
from rdr_service.model.site_enums import ObsoleteStatus
class AwardeeApi(BaseApi):
def __init__(self):
super(AwardeeApi, self).__init__(HPODao(), get_returns_children=True)
@auth_required(PTC_AND_HEALTHPRO)
def get(self, a_id=None):
self.dao.obsolete_filters = self._get_obsolete_filters()
if a_id:
hpo = self.dao.get_by_name(a_id)
if not hpo:
raise NotFound(f"Awardee with ID {a_id} not found")
return self._make_response(self.dao.get_with_children(hpo.hpoId))
return super(AwardeeApi, self)._query(id_field="id")
def _make_resource_url(self, json, id_field, participant_id): # pylint: disable=unused-argument
from rdr_service import main
return main.api.url_for(self.__class__, a_id=json[id_field], _external=True)
def _make_response(self, obj):
inactive = request.args.get("_inactive")
return self.dao.to_client_json(obj,
inactive_sites=inactive,
obsolete_filters=self._get_obsolete_filters())
def _get_obsolete_filters(self):
obsolete_param = request.args.get("_obsolete")
obsolete_filters = [None,
ObsoleteStatus.ACTIVE,
ObsoleteStatus.OBSOLETE]
if obsolete_param is not None and obsolete_param.lower() == 'false':
obsolete_filters = obsolete_filters[:-1]
return obsolete_filters
|
all-of-us/raw-data-repository
|
rdr_service/api/awardee_api.py
|
Python
|
bsd-3-clause
| 1,765
|
#!/usr/bin/env python
from paddle.trainer_config_helpers import *
height = 224
width = 224
num_class = 1000
batch_size = get_config_arg('batch_size', int, 64)
layer_num = get_config_arg('layer_num', int, 19)
is_infer = get_config_arg("is_infer", bool, False)
num_samples = get_config_arg('num_samples', int, 2560)
args = {
'height': height,
'width': width,
'color': True,
'num_class': num_class,
'is_infer': is_infer,
'num_samples': num_samples
}
define_py_data_sources2(
"train.list" if not is_infer else None,
"test.list" if is_infer else None,
module="provider",
obj="process",
args=args)
settings(
batch_size=batch_size,
learning_rate=0.001 / batch_size,
learning_method=MomentumOptimizer(0.9),
regularization=L2Regularization(0.0005 * batch_size))
img = data_layer(name='image', size=height * width * 3)
def vgg_network(vgg_num=3):
tmp = img_conv_group(
input=img,
num_channels=3,
conv_padding=1,
conv_num_filter=[64, 64],
conv_filter_size=3,
conv_act=ReluActivation(),
pool_size=2,
pool_stride=2,
pool_type=MaxPooling())
tmp = img_conv_group(
input=tmp,
conv_num_filter=[128, 128],
conv_padding=1,
conv_filter_size=3,
conv_act=ReluActivation(),
pool_stride=2,
pool_type=MaxPooling(),
pool_size=2)
channels = []
for i in range(vgg_num):
channels.append(256)
tmp = img_conv_group(
input=tmp,
conv_num_filter=channels,
conv_padding=1,
conv_filter_size=3,
conv_act=ReluActivation(),
pool_stride=2,
pool_type=MaxPooling(),
pool_size=2)
channels = []
for i in range(vgg_num):
channels.append(512)
tmp = img_conv_group(
input=tmp,
conv_num_filter=channels,
conv_padding=1,
conv_filter_size=3,
conv_act=ReluActivation(),
pool_stride=2,
pool_type=MaxPooling(),
pool_size=2)
tmp = img_conv_group(
input=tmp,
conv_num_filter=channels,
conv_padding=1,
conv_filter_size=3,
conv_act=ReluActivation(),
pool_stride=2,
pool_type=MaxPooling(),
pool_size=2)
tmp = fc_layer(
input=tmp,
size=4096,
act=ReluActivation(),
layer_attr=ExtraAttr(drop_rate=0.5))
tmp = fc_layer(
input=tmp,
size=4096,
act=ReluActivation(),
layer_attr=ExtraAttr(drop_rate=0.5))
return fc_layer(input=tmp, size=num_class, act=SoftmaxActivation())
if layer_num == 16:
vgg = vgg_network(3)
elif layer_num == 19:
vgg = vgg_network(4)
else:
print("Wrong layer number.")
if is_infer:
outputs(vgg)
else:
lab = data_layer('label', num_class)
loss = cross_entropy(input=vgg, label=lab)
outputs(loss)
|
reyoung/Paddle
|
benchmark/paddle/image/vgg.py
|
Python
|
apache-2.0
| 2,910
|
'''
Created on 6 jan. 2013
@author: sander
'''
from abc import ABCMeta, abstractmethod
from bitstring import ConstBitStream, Bits
from pylisp.packet.ip.protocol import ProtocolElement
class ControlMessage(ProtocolElement):
'''
This is the abstract base class for all LISP control packets
'''
__metaclass__ = ABCMeta
# Class property: which message type do we represent?
message_type = None
@abstractmethod
def __init__(self):
'''
Constructor
'''
@abstractmethod
def sanitize(self):
'''
Check if the current settings conform to the LISP specifications and
fix them where possible.
'''
@classmethod
@abstractmethod
def from_bytes(cls, bitstream):
'''
Look at the type of the message, instantiate the correct class and
let it parse the message.
'''
from pylisp.packet.lisp.control import type_registry
# Convert to ConstBitStream (if not already provided)
if not isinstance(bitstream, ConstBitStream):
if isinstance(bitstream, Bits):
bitstream = ConstBitStream(auto=bitstream)
else:
bitstream = ConstBitStream(bytes=bitstream)
# Peek at the bitstream to see which type it is
type_nr = bitstream.peek('uint:4')
# Look for the right class
type_class = type_registry.get_type_class(type_nr)
if not type_class:
raise ValueError("Can't handle message type {0}".format(type_nr))
# Let the specific class handle it from now on
return type_class.from_bytes(bitstream)
@abstractmethod
def to_bytes(self):
'''
Create bytes from properties
'''
|
steffann/pylisp
|
pylisp/packet/lisp/control/base.py
|
Python
|
bsd-3-clause
| 1,763
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.