code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# -*- coding: utf-8 *-*
import getopt
import sys
import spo_rev
def main(argv):
try:
opts, args = getopt.getopt(argv, 'hl:u:e:c:', ['help', 'uri=', 'log_type=', 'error=', 'folder='])
except getopt.GetoptError:
usage()
sys.exit()
# Valores predeterminados
log_type = 1
error = 0.1
folder = ''
uri = ''
if len(opts) > 0:
for opt, arg in opts:
if opt in ('-l', '--log_type'):
log_type = int(arg)
elif opt in ('-e', '--error'):
error = float(arg)
elif opt in ('-h', '--help'):
usage()
sys.exit()
elif opt in ('-u', '--uri'):
uri = arg
elif opt in '-c':
folder = arg
else:
usage()
sys.exit()
s = spo_rev.Spotify(raw_uri=uri, log_type=log_type, error=error, folder=folder)
s.main()
else:
usage()
sys.exit()
def usage():
print ("usage: main.py [-h] [-l] [-u] <playlist/song/album/artist uri> [-e] <error> [-c] <folder> \n")
print ("-h Shows help.")
print ("\nUsage:")
print ("-u Spotify playlist/song/album/artist uri.")
print ("\nOptions:")
print ("-l Log type (0 - 1). Shows download info (1) or not (0).")
print ("-e Error rate (0 - 1). Error at searching songs.")
print ("-c Folder name in which songs will be stored.")
if __name__ == "__main__":
main(sys.argv[1:])
| r3v1/Spotify-tracks-dl | src/main.py | Python | gpl-3.0 | 1,550 |
import os
import sys
from .core import *
from metatools.apps.runtime import initialize, poll_event_loop, run_event_loop
def main_bundle():
logfile = open('/tmp/%s.log' % __name__, 'a')
logfile.write('==========\n')
class Tee(object):
def __init__(self, fhs):
self.fhs = fhs
def write(self, data):
for fh in self.fhs:
fh.write(data)
fh.flush()
sys.stdout = Tee((logfile, sys.stdout))
sys.stderr = Tee((logfile, sys.stderr))
main()
# Target of the notification app.
def noop():
pass
def main():
import argparse
import time
parser = argparse.ArgumentParser()
parser.add_argument('--loop', action='store_true')
parser.add_argument('--qt', action='store_true')
args = parser.parse_args()
if IS_MACOS:
initialize(standalone=not (args.loop or args.qt))
if args.qt:
from ..qt import Q
qt_app = Q.Application([])
button = Q.PushButton("HERE")
@button.clicked.connect
def on_button_clicked():
Notification('Qt', 'You pressed the button in QT').send()
button.show()
qt_timer = Q.Timer()
qt_timer.setInterval(0)
# Need to keep this around (for the libnotify handle).
note = Notification('Test', 'This is a test.', subtitle='Subtitle')
print note
note.send()
if args.loop:
# if IS_LINUX:
# glib_loop = QLib.MainLoop()
# if args.qt:
# def poll_event_loop():
# # This must be added every time.
# GLib.idle_add(glib_loop.quit)
# glib_loop.run()
# else:
# glib_loop.run()
if args.qt:
# Connect the OS loop to the Qt loop, and start it up.
qt_timer.timeout.connect(poll_event_loop)
qt_app.exec_()
else:
run_event_loop()
if __name__ == '__main__':
main() | westernx/uitools | uitools/notifications/_main.py | Python | bsd-3-clause | 2,007 |
# standard library
from importlib import import_module
import os
import shutil
from django.core.management.base import CommandError
from django.core.management.templates import TemplateCommand
# utils
from inflection import camelize
from inflection import pluralize
from inflection import singularize
from inflection import underscore
class AppTemplateCommand(TemplateCommand):
rewrite_template_suffixes = (
# Allow shipping invalid .py files without byte-compilation.
('.py-tpl', '.py'),
('.pug-tpl', '.pug'),
)
def add_arguments(self, parser):
parser.add_argument('name', help='Name of the application or project.')
parser.add_argument(
'directory',
nargs='?', help='Optional destination directory'
)
parser.add_argument(
'--model_name',
help='The path or URL to load the template from.'
)
parser.add_argument(
'--template',
help='The path or URL to load the template from.'
)
parser.add_argument(
'--extension', '-e', dest='extensions',
action='append', default=['py', 'pug'],
help='The file extension(s) to render (default: "py"). '
'Separate multiple extensions with commas, or use '
'-e multiple times.'
)
parser.add_argument(
'--name', '-n', dest='files',
action='append', default=[],
help='The file name(s) to render. Separate multiple extensions '
'with commas, or use -n multiple times.'
)
class Command(AppTemplateCommand):
help = (
"Creates a Django app directory structure for the given app name in "
"the current directory or optionally in the given directory."
)
missing_args_message = "You must provide an application name."
def handle(self, **options):
app_name, target = options.pop('name'), options.pop('directory')
if options.get('model_name'):
model_name = options.pop('model_name')
snake_case_model_name = underscore(model_name)
else:
snake_case_model_name = singularize(app_name)
model_name = camelize(snake_case_model_name)
options['model_name'] = model_name
options['snake_case_model_name'] = snake_case_model_name
options['lower_case_model_name'] = model_name.lower()
options['model_verbose_name'] = snake_case_model_name.replace('_', ' ')
options['plural_model_verbose_name'] = pluralize(
options['model_verbose_name']
)
options['plural_model_name'] = camelize(pluralize(
options['plural_model_verbose_name']
))
self.validate_name(app_name, "app")
# Check that the app_name cannot be imported.
try:
import_module(app_name)
except ImportError:
pass
else:
raise CommandError(
"%r conflicts with the name of an existing Python module and "
"cannot be used as an app name. Please try another name." %
app_name
)
super(Command, self).handle('app', app_name, target, **options)
templates_dir = '{}/templates/{}/'.format(
app_name, app_name
)
for root, dirs, files in os.walk(templates_dir):
for pug_file in files:
shutil.move(
'{}{}'.format(root, pug_file),
'{}{}_{}'.format(root, snake_case_model_name, pug_file)
)
| magnet-cl/django-project-template-py3 | base/management/commands/startapp.py | Python | mit | 3,627 |
# -*- coding: utf-8 -*-
"""
ptime.format
~~~~~~~~~~~~
:copyright: (c) 2013 by Marat Ibadinov.
:license: MIT, see LICENSE for more details.
"""
import re
class FormatError(Exception):
pass
class Format(object):
TEMPLATES = {
# day #
'd': (r'\d{2}', 'day'),
'D': (r'[a-z]{3}', 'weekday'),
'j': (r'(?:[1-9])|(?:[1-3][0-9])', 'day'),
'l': (ur'[a-zа-я]+', 'weekday'),
'N': (r'[1-7]', 'weekday'),
'w': (r'[0-6]', 'weekday'),
'z': (r'\d{3}', 'yearday'),
# week #
# 'W': (r'\d{1,2}', None),
# month #
'F': (ur'[a-zа-я]+', 'month_name'),
'm': (r'\d{2}', 'month'),
'M': (ur'[a-zа-я]{3}', 'month_abbr'),
'n': (r'(?:[1-9])|(?:1[0-2])', 'month'),
# year #
# 'o': (r'\d{4}', 'year'), # should correlate with W
'Y': (r'\d{4}', 'year'),
'y': (r'\d{2}', 'year'),
'C': (r'\d{2}', 'century'),
# time #
'a': (r'(?:am)|(?:pm)', 'ampm'),
'A': (r'(?:am)|(?:pm)', 'ampm'),
'g': (r'\d{1,2}', 'hour'),
'G': (r'\d{1,2}', 'hour'),
'h': (r'\d{2}', 'hour'),
'H': (r'\d{2}', 'hour'),
'i': (r'\d{2}', 'minute'),
's': (r'\d{2}', 'second'),
'u': (r'\d{6}', 'microsecond'),
# timezones #
'e': (r'[a-z\/]+', 'timezone'),
'O': (r'[+-]\d{4}', 'offset_hours'),
'P': (r'[+-]\d{2}:\d{2}', 'offset_hours'),
'R': (r'[+-]\d{2}:?\d{2}', 'offset_hours'),
'T': (r'[a-z]+', 'timezone'),
'Z': (r'(?:+?)\d+', 'offset_seconds'),
# relative #
'L': (ur'(?:[a-zа-яіїєґ]+\s?)+', 'relative_day'),
'K': (ur'\d+\s+(?:[a-zа-я]+\s?)+', 'days_ago')
}
def __init__(self, template):
self.template = template
regexp, attributes = self.parse_template(template)
self.regexp = re.compile(r'^%s$' % regexp, re.IGNORECASE | re.UNICODE)
self.attributes = attributes
def parse_template(self, template):
regexp = []
attributes = []
had_percent = False
for character in template:
if character == '%':
if had_percent:
regexp.append(character)
had_percent = not had_percent
continue
if had_percent:
if not character in self.TEMPLATES:
raise FormatError(
"'%{0}' is not a valid template specifier".format(character)
)
pattern, attribute = self.TEMPLATES[character]
regexp.extend(['(', pattern, ')'])
attributes.append(attribute)
had_percent = False
else:
regexp.append(character)
return ''.join(regexp), attributes
def __eq__(self, other):
if not isinstance(other, Format):
return False
return self.__dict__ == other.__dict__
@classmethod
def iso8601(cls):
# not all variations of ISO-8601 datetime are supported currently
return cls(r'%Y-%m-%d(?:T|\s)%H:%i(?::%s)?(?:%R)')
@classmethod
def rfc822(cls):
return cls(r'%D, %d %M %Y %H:%i:%s %O')
@classmethod
def rfc3339(cls):
return cls(r'%Y-%m-%dT%H:%i:%s(?:\.%u)?%P')
@classmethod
def rfc850(cls):
return cls(r'%l, %d-%M-%y %H:%i:%s %T')
@classmethod
def mysql(cls):
return cls(r'%Y-%m-%d %H:%i:%s')
# RFC 822 aliases
rfc1036 = rfc822
rfc1123 = rfc822
rfc2822 = rfc822
rss = rfc822
# RFC 850 aliases
cookie = rfc850
# RFC 3339 aliases
w3c = rfc3339
atom = rfc3339
| Ibadinov/ptime | ptime/format.py | Python | mit | 4,245 |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import mock
import unittest
from tempfile import mkdtemp
from shutil import rmtree
from StringIO import StringIO
from time import gmtime
from test.unit import FakeLogger
import itertools
import random
import simplejson
import xml.dom.minidom
from swift import __version__ as swift_version
from swift.common.swob import Request
from swift.common import constraints
from swift.account.server import AccountController
from swift.common.utils import normalize_timestamp, replication, public
from swift.common.request_helpers import get_sys_meta_prefix
from test.unit import patch_policies
from swift.common.storage_policy import StoragePolicy, POLICIES
@patch_policies
class TestAccountController(unittest.TestCase):
"""Test swift.account.server.AccountController"""
def setUp(self):
"""Set up for testing swift.account.server.AccountController"""
self.testdir_base = mkdtemp()
self.testdir = os.path.join(self.testdir_base, 'account_server')
self.controller = AccountController(
{'devices': self.testdir, 'mount_check': 'false'})
def tearDown(self):
"""Tear down for testing swift.account.server.AccountController"""
try:
rmtree(self.testdir_base)
except OSError as err:
if err.errno != errno.ENOENT:
raise
def test_OPTIONS(self):
server_handler = AccountController(
{'devices': self.testdir, 'mount_check': 'false'})
req = Request.blank('/sda1/p/a/c/o', {'REQUEST_METHOD': 'OPTIONS'})
req.content_length = 0
resp = server_handler.OPTIONS(req)
self.assertEquals(200, resp.status_int)
for verb in 'OPTIONS GET POST PUT DELETE HEAD REPLICATE'.split():
self.assertTrue(
verb in resp.headers['Allow'].split(', '))
self.assertEquals(len(resp.headers['Allow'].split(', ')), 7)
self.assertEquals(resp.headers['Server'],
(server_handler.server_type + '/' + swift_version))
def test_DELETE_not_found(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertTrue('X-Account-Status' not in resp.headers)
def test_DELETE_empty(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['X-Account-Status'], 'Deleted')
def test_DELETE_not_empty(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
# We now allow deleting non-empty accounts
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['X-Account-Status'], 'Deleted')
def test_DELETE_now_empty(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a/c1',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '2',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['X-Account-Status'], 'Deleted')
def test_DELETE_invalid_partition(self):
req = Request.blank('/sda1/./a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_DELETE_timestamp_not_float(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': 'not-float'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_DELETE_insufficient_storage(self):
self.controller = AccountController({'devices': self.testdir})
req = Request.blank(
'/sda-null/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 507)
def test_HEAD_not_found(self):
# Test the case in which account does not exist (can be recreated)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertTrue('X-Account-Status' not in resp.headers)
# Test the case in which account was deleted but not yet reaped
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.headers['X-Account-Status'], 'Deleted')
def test_HEAD_empty_account(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['x-account-container-count'], '0')
self.assertEqual(resp.headers['x-account-object-count'], '0')
self.assertEqual(resp.headers['x-account-bytes-used'], '0')
def test_HEAD_with_containers(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['x-account-container-count'], '2')
self.assertEqual(resp.headers['x-account-object-count'], '0')
self.assertEqual(resp.headers['x-account-bytes-used'], '0')
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '1',
'X-Bytes-Used': '2',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '3',
'X-Bytes-Used': '4',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD',
'HTTP_X_TIMESTAMP': '5'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['x-account-container-count'], '2')
self.assertEqual(resp.headers['x-account-object-count'], '4')
self.assertEqual(resp.headers['x-account-bytes-used'], '6')
def test_HEAD_invalid_partition(self):
req = Request.blank('/sda1/./a', environ={'REQUEST_METHOD': 'HEAD',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_HEAD_invalid_content_type(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'},
headers={'Accept': 'application/plain'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 406)
def test_HEAD_insufficient_storage(self):
self.controller = AccountController({'devices': self.testdir})
req = Request.blank('/sda-null/p/a', environ={'REQUEST_METHOD': 'HEAD',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 507)
def test_HEAD_invalid_format(self):
format = '%D1%BD%8A9' # invalid UTF-8; should be %E1%BD%8A9 (E -> D)
req = Request.blank('/sda1/p/a?format=' + format,
environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_PUT_not_found(self):
req = Request.blank(
'/sda1/p/a/c', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-PUT-Timestamp': normalize_timestamp(1),
'X-DELETE-Timestamp': normalize_timestamp(0),
'X-Object-Count': '1',
'X-Bytes-Used': '1',
'X-Timestamp': normalize_timestamp(0)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertTrue('X-Account-Status' not in resp.headers)
def test_PUT(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
def test_PUT_simulated_create_race(self):
state = ['initial']
from swift.account.backend import AccountBroker as OrigAcBr
class InterceptedAcBr(OrigAcBr):
def __init__(self, *args, **kwargs):
super(InterceptedAcBr, self).__init__(*args, **kwargs)
if state[0] == 'initial':
# Do nothing initially
pass
elif state[0] == 'race':
# Save the original db_file attribute value
self._saved_db_file = self.db_file
self.db_file += '.doesnotexist'
def initialize(self, *args, **kwargs):
if state[0] == 'initial':
# Do nothing initially
pass
elif state[0] == 'race':
# Restore the original db_file attribute to get the race
# behavior
self.db_file = self._saved_db_file
return super(InterceptedAcBr, self).initialize(*args, **kwargs)
with mock.patch("swift.account.server.AccountBroker", InterceptedAcBr):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
state[0] = "race"
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
def test_PUT_after_DELETE(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE'},
headers={'X-Timestamp': normalize_timestamp(1)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(2)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 403)
self.assertEqual(resp.body, 'Recently deleted')
self.assertEqual(resp.headers['X-Account-Status'], 'Deleted')
def test_PUT_GET_metadata(self):
# Set metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1),
'X-Account-Meta-Test': 'Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-account-meta-test'), 'Value')
# Set another metadata header, ensuring old one doesn't disappear
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(1),
'X-Account-Meta-Test2': 'Value2'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-account-meta-test'), 'Value')
self.assertEqual(resp.headers.get('x-account-meta-test2'), 'Value2')
# Update metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(3),
'X-Account-Meta-Test': 'New Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-account-meta-test'), 'New Value')
# Send old update to metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(2),
'X-Account-Meta-Test': 'Old Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-account-meta-test'), 'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(4),
'X-Account-Meta-Test': ''})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assert_('x-account-meta-test' not in resp.headers)
def test_PUT_GET_sys_metadata(self):
prefix = get_sys_meta_prefix('account')
hdr = '%stest' % prefix
hdr2 = '%stest2' % prefix
# Set metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1),
hdr.title(): 'Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(hdr), 'Value')
# Set another metadata header, ensuring old one doesn't disappear
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(1),
hdr2.title(): 'Value2'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(hdr), 'Value')
self.assertEqual(resp.headers.get(hdr2), 'Value2')
# Update metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(3),
hdr.title(): 'New Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(hdr), 'New Value')
# Send old update to metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(2),
hdr.title(): 'Old Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(hdr), 'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(4),
hdr.title(): ''})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 202)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assert_(hdr not in resp.headers)
def test_PUT_invalid_partition(self):
req = Request.blank('/sda1/./a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_PUT_insufficient_storage(self):
self.controller = AccountController({'devices': self.testdir})
req = Request.blank('/sda-null/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 507)
def test_POST_HEAD_metadata(self):
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# Set metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(1),
'X-Account-Meta-Test': 'Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-account-meta-test'), 'Value')
# Update metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(3),
'X-Account-Meta-Test': 'New Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-account-meta-test'), 'New Value')
# Send old update to metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(2),
'X-Account-Meta-Test': 'Old Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get('x-account-meta-test'), 'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(4),
'X-Account-Meta-Test': ''})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assert_('x-account-meta-test' not in resp.headers)
def test_POST_HEAD_sys_metadata(self):
prefix = get_sys_meta_prefix('account')
hdr = '%stest' % prefix
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Timestamp': normalize_timestamp(1)})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# Set metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(1),
hdr.title(): 'Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(hdr), 'Value')
# Update metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(3),
hdr.title(): 'New Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(hdr), 'New Value')
# Send old update to metadata header
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(2),
hdr.title(): 'Old Value'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers.get(hdr), 'New Value')
# Remove metadata header (by setting it to empty)
req = Request.blank(
'/sda1/p/a', environ={'REQUEST_METHOD': 'POST'},
headers={'X-Timestamp': normalize_timestamp(4),
hdr.title(): ''})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'HEAD'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assert_(hdr not in resp.headers)
def test_POST_invalid_partition(self):
req = Request.blank('/sda1/./a', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_POST_timestamp_not_float(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '0'},
headers={'X-Timestamp': 'not-float'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400)
def test_POST_insufficient_storage(self):
self.controller = AccountController({'devices': self.testdir})
req = Request.blank('/sda-null/p/a', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 507)
def test_POST_after_DELETE_not_found(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 204)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'POST',
'HTTP_X_TIMESTAMP': '2'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.headers['X-Account-Status'], 'Deleted')
def test_GET_not_found_plain(self):
# Test the case in which account does not exist (can be recreated)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertTrue('X-Account-Status' not in resp.headers)
# Test the case in which account was deleted but not yet reaped
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'DELETE',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertEqual(resp.headers['X-Account-Status'], 'Deleted')
def test_GET_not_found_json(self):
req = Request.blank('/sda1/p/a?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
def test_GET_not_found_xml(self):
req = Request.blank('/sda1/p/a?format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
def test_GET_empty_account_plain(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204)
self.assertEqual(resp.headers['Content-Type'],
'text/plain; charset=utf-8')
def test_GET_empty_account_json(self):
req = Request.blank('/sda1/p/a?format=json',
environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['Content-Type'],
'application/json; charset=utf-8')
def test_GET_empty_account_xml(self):
req = Request.blank('/sda1/p/a?format=xml',
environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.headers['Content-Type'],
'application/xml; charset=utf-8')
def test_GET_over_limit(self):
req = Request.blank(
'/sda1/p/a?limit=%d' % (constraints.ACCOUNT_LISTING_LIMIT + 1),
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 412)
def test_GET_with_containers_plain(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body.strip().split('\n'), ['c1', 'c2'])
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '1',
'X-Bytes-Used': '2',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '3',
'X-Bytes-Used': '4',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body.strip().split('\n'), ['c1', 'c2'])
self.assertEqual(resp.content_type, 'text/plain')
self.assertEqual(resp.charset, 'utf-8')
# test unknown format uses default plain
req = Request.blank('/sda1/p/a?format=somethinglese',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body.strip().split('\n'), ['c1', 'c2'])
self.assertEqual(resp.content_type, 'text/plain')
self.assertEqual(resp.charset, 'utf-8')
def test_GET_with_containers_json(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(simplejson.loads(resp.body),
[{'count': 0, 'bytes': 0, 'name': 'c1'},
{'count': 0, 'bytes': 0, 'name': 'c2'}])
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '1',
'X-Bytes-Used': '2',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '3',
'X-Bytes-Used': '4',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(simplejson.loads(resp.body),
[{'count': 1, 'bytes': 2, 'name': 'c1'},
{'count': 3, 'bytes': 4, 'name': 'c2'}])
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(resp.charset, 'utf-8')
def test_GET_with_containers_xml(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'application/xml')
self.assertEqual(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEqual(dom.firstChild.nodeName, 'account')
listing = \
[n for n in dom.firstChild.childNodes if n.nodeName != '#text']
self.assertEqual(len(listing), 2)
self.assertEqual(listing[0].nodeName, 'container')
container = [n for n in listing[0].childNodes if n.nodeName != '#text']
self.assertEqual(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEqual(node.firstChild.nodeValue, 'c1')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEqual(node.firstChild.nodeValue, '0')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEqual(node.firstChild.nodeValue, '0')
self.assertEqual(listing[-1].nodeName, 'container')
container = \
[n for n in listing[-1].childNodes if n.nodeName != '#text']
self.assertEqual(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEqual(node.firstChild.nodeValue, 'c2')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEqual(node.firstChild.nodeValue, '0')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEqual(node.firstChild.nodeValue, '0')
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '1',
'X-Bytes-Used': '2',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c2', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '2',
'X-Delete-Timestamp': '0',
'X-Object-Count': '3',
'X-Bytes-Used': '4',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEqual(dom.firstChild.nodeName, 'account')
listing = \
[n for n in dom.firstChild.childNodes if n.nodeName != '#text']
self.assertEqual(len(listing), 2)
self.assertEqual(listing[0].nodeName, 'container')
container = [n for n in listing[0].childNodes if n.nodeName != '#text']
self.assertEqual(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEqual(node.firstChild.nodeValue, 'c1')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEqual(node.firstChild.nodeValue, '1')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEqual(node.firstChild.nodeValue, '2')
self.assertEqual(listing[-1].nodeName, 'container')
container = [
n for n in listing[-1].childNodes if n.nodeName != '#text']
self.assertEqual(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEqual(node.firstChild.nodeValue, 'c2')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEqual(node.firstChild.nodeValue, '3')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEqual(node.firstChild.nodeValue, '4')
self.assertEqual(resp.charset, 'utf-8')
def test_GET_xml_escapes_account_name(self):
req = Request.blank(
'/sda1/p/%22%27', # "'
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/%22%27?format=xml',
environ={'REQUEST_METHOD': 'GET', 'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEqual(dom.firstChild.attributes['name'].value, '"\'')
def test_GET_xml_escapes_container_name(self):
req = Request.blank(
'/sda1/p/a',
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a/%22%3Cword', # "<word
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_PUT_TIMESTAMP': '1', 'HTTP_X_OBJECT_COUNT': '0',
'HTTP_X_DELETE_TIMESTAMP': '0', 'HTTP_X_BYTES_USED': '1'})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a?format=xml',
environ={'REQUEST_METHOD': 'GET', 'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEqual(
dom.firstChild.firstChild.nextSibling.firstChild.firstChild.data,
'"<word')
def test_GET_xml_escapes_container_name_as_subdir(self):
req = Request.blank(
'/sda1/p/a',
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a/%22%3Cword-test', # "<word-test
environ={'REQUEST_METHOD': 'PUT', 'HTTP_X_TIMESTAMP': '1',
'HTTP_X_PUT_TIMESTAMP': '1', 'HTTP_X_OBJECT_COUNT': '0',
'HTTP_X_DELETE_TIMESTAMP': '0', 'HTTP_X_BYTES_USED': '1'})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a?format=xml&delimiter=-',
environ={'REQUEST_METHOD': 'GET', 'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEqual(
dom.firstChild.firstChild.nextSibling.attributes['name'].value,
'"<word-')
def test_GET_limit_marker_plain(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
for c in range(5):
req = Request.blank(
'/sda1/p/a/c%d' % c,
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': str(c + 1),
'X-Delete-Timestamp': '0',
'X-Object-Count': '2',
'X-Bytes-Used': '3',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?limit=3',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body.strip().split('\n'), ['c0', 'c1', 'c2'])
req = Request.blank('/sda1/p/a?limit=3&marker=c2',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body.strip().split('\n'), ['c3', 'c4'])
def test_GET_limit_marker_json(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
for c in range(5):
req = Request.blank(
'/sda1/p/a/c%d' % c,
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': str(c + 1),
'X-Delete-Timestamp': '0',
'X-Object-Count': '2',
'X-Bytes-Used': '3',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?limit=3&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(simplejson.loads(resp.body),
[{'count': 2, 'bytes': 3, 'name': 'c0'},
{'count': 2, 'bytes': 3, 'name': 'c1'},
{'count': 2, 'bytes': 3, 'name': 'c2'}])
req = Request.blank('/sda1/p/a?limit=3&marker=c2&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(simplejson.loads(resp.body),
[{'count': 2, 'bytes': 3, 'name': 'c3'},
{'count': 2, 'bytes': 3, 'name': 'c4'}])
def test_GET_limit_marker_xml(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
for c in range(5):
req = Request.blank(
'/sda1/p/a/c%d' % c,
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': str(c + 1),
'X-Delete-Timestamp': '0',
'X-Object-Count': '2',
'X-Bytes-Used': '3',
'X-Timestamp': normalize_timestamp(c)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?limit=3&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEqual(dom.firstChild.nodeName, 'account')
listing = \
[n for n in dom.firstChild.childNodes if n.nodeName != '#text']
self.assertEqual(len(listing), 3)
self.assertEqual(listing[0].nodeName, 'container')
container = [n for n in listing[0].childNodes if n.nodeName != '#text']
self.assertEqual(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEqual(node.firstChild.nodeValue, 'c0')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEqual(node.firstChild.nodeValue, '2')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEqual(node.firstChild.nodeValue, '3')
self.assertEqual(listing[-1].nodeName, 'container')
container = [
n for n in listing[-1].childNodes if n.nodeName != '#text']
self.assertEqual(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEqual(node.firstChild.nodeValue, 'c2')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEqual(node.firstChild.nodeValue, '2')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEqual(node.firstChild.nodeValue, '3')
req = Request.blank('/sda1/p/a?limit=3&marker=c2&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEqual(dom.firstChild.nodeName, 'account')
listing = \
[n for n in dom.firstChild.childNodes if n.nodeName != '#text']
self.assertEqual(len(listing), 2)
self.assertEqual(listing[0].nodeName, 'container')
container = [n for n in listing[0].childNodes if n.nodeName != '#text']
self.assertEqual(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEqual(node.firstChild.nodeValue, 'c3')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEqual(node.firstChild.nodeValue, '2')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEqual(node.firstChild.nodeValue, '3')
self.assertEqual(listing[-1].nodeName, 'container')
container = [
n for n in listing[-1].childNodes if n.nodeName != '#text']
self.assertEqual(sorted([n.nodeName for n in container]),
['bytes', 'count', 'name'])
node = [n for n in container if n.nodeName == 'name'][0]
self.assertEqual(node.firstChild.nodeValue, 'c4')
node = [n for n in container if n.nodeName == 'count'][0]
self.assertEqual(node.firstChild.nodeValue, '2')
node = [n for n in container if n.nodeName == 'bytes'][0]
self.assertEqual(node.firstChild.nodeValue, '3')
def test_GET_accept_wildcard(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
req.accept = '*/*'
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, 'c1\n')
def test_GET_accept_application_wildcard(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
resp = req.get_response(self.controller)
self.assertEquals(resp.status_int, 201)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/*'
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(simplejson.loads(resp.body)), 1)
def test_GET_accept_json(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/json'
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(simplejson.loads(resp.body)), 1)
def test_GET_accept_xml(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/xml'
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
self.assertEqual(dom.firstChild.nodeName, 'account')
listing = \
[n for n in dom.firstChild.childNodes if n.nodeName != '#text']
self.assertEqual(len(listing), 1)
def test_GET_accept_conflicting(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?format=plain',
environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/json'
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body, 'c1\n')
def test_GET_accept_not_valid(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a/c1', environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
req.accept = 'application/xml*'
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 406)
def test_GET_delimiter_too_long(self):
req = Request.blank('/sda1/p/a?delimiter=xx',
environ={'REQUEST_METHOD': 'GET',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 412)
def test_GET_prefix_delimiter_plain(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for first in range(3):
req = Request.blank(
'/sda1/p/a/sub.%s' % first,
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
for second in range(3):
req = Request.blank(
'/sda1/p/a/sub.%s.%s' % (first, second),
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?delimiter=.',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body.strip().split('\n'), ['sub.'])
req = Request.blank('/sda1/p/a?prefix=sub.&delimiter=.',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(
resp.body.strip().split('\n'),
['sub.0', 'sub.0.', 'sub.1', 'sub.1.', 'sub.2', 'sub.2.'])
req = Request.blank('/sda1/p/a?prefix=sub.1.&delimiter=.',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.body.strip().split('\n'),
['sub.1.0', 'sub.1.1', 'sub.1.2'])
def test_GET_prefix_delimiter_json(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for first in range(3):
req = Request.blank(
'/sda1/p/a/sub.%s' % first,
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
for second in range(3):
req = Request.blank(
'/sda1/p/a/sub.%s.%s' % (first, second),
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank('/sda1/p/a?delimiter=.&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual([n.get('name', 's:' + n.get('subdir', 'error'))
for n in simplejson.loads(resp.body)], ['s:sub.'])
req = Request.blank('/sda1/p/a?prefix=sub.&delimiter=.&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(
[n.get('name', 's:' + n.get('subdir', 'error'))
for n in simplejson.loads(resp.body)],
['sub.0', 's:sub.0.', 'sub.1', 's:sub.1.', 'sub.2', 's:sub.2.'])
req = Request.blank('/sda1/p/a?prefix=sub.1.&delimiter=.&format=json',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
self.assertEqual(
[n.get('name', 's:' + n.get('subdir', 'error'))
for n in simplejson.loads(resp.body)],
['sub.1.0', 'sub.1.1', 'sub.1.2'])
def test_GET_prefix_delimiter_xml(self):
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'PUT',
'HTTP_X_TIMESTAMP': '0'})
resp = req.get_response(self.controller)
for first in range(3):
req = Request.blank(
'/sda1/p/a/sub.%s' % first,
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
for second in range(3):
req = Request.blank(
'/sda1/p/a/sub.%s.%s' % (first, second),
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Put-Timestamp': '1',
'X-Delete-Timestamp': '0',
'X-Object-Count': '0',
'X-Bytes-Used': '0',
'X-Timestamp': normalize_timestamp(0)})
req.get_response(self.controller)
req = Request.blank(
'/sda1/p/a?delimiter=.&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
listing = []
for node1 in dom.firstChild.childNodes:
if node1.nodeName == 'subdir':
listing.append('s:' + node1.attributes['name'].value)
elif node1.nodeName == 'container':
for node2 in node1.childNodes:
if node2.nodeName == 'name':
listing.append(node2.firstChild.nodeValue)
self.assertEqual(listing, ['s:sub.'])
req = Request.blank(
'/sda1/p/a?prefix=sub.&delimiter=.&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
listing = []
for node1 in dom.firstChild.childNodes:
if node1.nodeName == 'subdir':
listing.append('s:' + node1.attributes['name'].value)
elif node1.nodeName == 'container':
for node2 in node1.childNodes:
if node2.nodeName == 'name':
listing.append(node2.firstChild.nodeValue)
self.assertEqual(
listing,
['sub.0', 's:sub.0.', 'sub.1', 's:sub.1.', 'sub.2', 's:sub.2.'])
req = Request.blank(
'/sda1/p/a?prefix=sub.1.&delimiter=.&format=xml',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
dom = xml.dom.minidom.parseString(resp.body)
listing = []
for node1 in dom.firstChild.childNodes:
if node1.nodeName == 'subdir':
listing.append('s:' + node1.attributes['name'].value)
elif node1.nodeName == 'container':
for node2 in node1.childNodes:
if node2.nodeName == 'name':
listing.append(node2.firstChild.nodeValue)
self.assertEqual(listing, ['sub.1.0', 'sub.1.1', 'sub.1.2'])
def test_GET_insufficient_storage(self):
self.controller = AccountController({'devices': self.testdir})
req = Request.blank('/sda-null/p/a', environ={'REQUEST_METHOD': 'GET',
'HTTP_X_TIMESTAMP': '1'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 507)
def test_through_call(self):
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '404 ')
def test_through_call_invalid_path(self):
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '/bob',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '400 ')
def test_through_call_invalid_path_utf8(self):
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'PATH_INFO': '\x00',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '412 ')
def test_invalid_method_doesnt_exist(self):
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': 'method_doesnt_exist',
'PATH_INFO': '/sda1/p/a'},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '405 ')
def test_invalid_method_is_not_public(self):
errbuf = StringIO()
outbuf = StringIO()
def start_response(*args):
outbuf.writelines(args)
self.controller.__call__({'REQUEST_METHOD': '__init__',
'PATH_INFO': '/sda1/p/a'},
start_response)
self.assertEqual(errbuf.getvalue(), '')
self.assertEqual(outbuf.getvalue()[:4], '405 ')
def test_params_format(self):
Request.blank('/sda1/p/a',
headers={'X-Timestamp': normalize_timestamp(1)},
environ={'REQUEST_METHOD': 'PUT'}).get_response(
self.controller)
for format in ('xml', 'json'):
req = Request.blank('/sda1/p/a?format=%s' % format,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 200)
def test_params_utf8(self):
# Bad UTF8 sequence, all parameters should cause 400 error
for param in ('delimiter', 'limit', 'marker', 'prefix', 'end_marker',
'format'):
req = Request.blank('/sda1/p/a?%s=\xce' % param,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 400,
"%d on param %s" % (resp.status_int, param))
# Good UTF8 sequence for delimiter, too long (1 byte delimiters only)
req = Request.blank('/sda1/p/a?delimiter=\xce\xa9',
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 412,
"%d on param delimiter" % (resp.status_int))
Request.blank('/sda1/p/a',
headers={'X-Timestamp': normalize_timestamp(1)},
environ={'REQUEST_METHOD': 'PUT'}).get_response(
self.controller)
# Good UTF8 sequence, ignored for limit, doesn't affect other queries
for param in ('limit', 'marker', 'prefix', 'end_marker', 'format'):
req = Request.blank('/sda1/p/a?%s=\xce\xa9' % param,
environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 204,
"%d on param %s" % (resp.status_int, param))
def test_PUT_auto_create(self):
headers = {'x-put-timestamp': normalize_timestamp(1),
'x-delete-timestamp': normalize_timestamp(0),
'x-object-count': '0',
'x-bytes-used': '0'}
req = Request.blank('/sda1/p/a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
req = Request.blank('/sda1/p/.a/c',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a/.c',
environ={'REQUEST_METHOD': 'PUT'},
headers=dict(headers))
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
def test_content_type_on_HEAD(self):
Request.blank('/sda1/p/a',
headers={'X-Timestamp': normalize_timestamp(1)},
environ={'REQUEST_METHOD': 'PUT'}).get_response(
self.controller)
env = {'REQUEST_METHOD': 'HEAD'}
req = Request.blank('/sda1/p/a?format=xml', environ=env)
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'application/xml')
req = Request.blank('/sda1/p/a?format=json', environ=env)
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(resp.charset, 'utf-8')
req = Request.blank('/sda1/p/a', environ=env)
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'text/plain')
self.assertEqual(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a', headers={'Accept': 'application/json'}, environ=env)
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'application/json')
self.assertEqual(resp.charset, 'utf-8')
req = Request.blank(
'/sda1/p/a', headers={'Accept': 'application/xml'}, environ=env)
resp = req.get_response(self.controller)
self.assertEqual(resp.content_type, 'application/xml')
self.assertEqual(resp.charset, 'utf-8')
def test_serv_reserv(self):
# Test replication_server flag was set from configuration file.
conf = {'devices': self.testdir, 'mount_check': 'false'}
self.assertEqual(AccountController(conf).replication_server, None)
for val in [True, '1', 'True', 'true']:
conf['replication_server'] = val
self.assertTrue(AccountController(conf).replication_server)
for val in [False, 0, '0', 'False', 'false', 'test_string']:
conf['replication_server'] = val
self.assertFalse(AccountController(conf).replication_server)
def test_list_allowed_methods(self):
# Test list of allowed_methods
obj_methods = ['DELETE', 'PUT', 'HEAD', 'GET', 'POST']
repl_methods = ['REPLICATE']
for method_name in obj_methods:
method = getattr(self.controller, method_name)
self.assertFalse(hasattr(method, 'replication'))
for method_name in repl_methods:
method = getattr(self.controller, method_name)
self.assertEqual(method.replication, True)
def test_correct_allowed_method(self):
# Test correct work for allowed method using
# swift.account.server.AccountController.__call__
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
self.controller = AccountController(
{'devices': self.testdir,
'mount_check': 'false',
'replication_server': 'false'})
def start_response(*args):
"""Sends args to outbuf"""
outbuf.writelines(args)
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
method_res = mock.MagicMock()
mock_method = public(lambda x: mock.MagicMock(return_value=method_res))
with mock.patch.object(self.controller, method,
new=mock_method):
mock_method.replication = False
response = self.controller(env, start_response)
self.assertEqual(response, method_res)
def test_not_allowed_method(self):
# Test correct work for NOT allowed method using
# swift.account.server.AccountController.__call__
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
self.controller = AccountController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'false'})
def start_response(*args):
"""Sends args to outbuf"""
outbuf.writelines(args)
method = 'PUT'
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
answer = ['<html><h1>Method Not Allowed</h1><p>The method is not '
'allowed for this resource.</p></html>']
mock_method = replication(public(lambda x: mock.MagicMock()))
with mock.patch.object(self.controller, method,
new=mock_method):
mock_method.replication = True
response = self.controller.__call__(env, start_response)
self.assertEqual(response, answer)
def test_call_incorrect_replication_method(self):
inbuf = StringIO()
errbuf = StringIO()
outbuf = StringIO()
self.controller = AccountController(
{'devices': self.testdir, 'mount_check': 'false',
'replication_server': 'true'})
def start_response(*args):
"""Sends args to outbuf"""
outbuf.writelines(args)
obj_methods = ['DELETE', 'PUT', 'HEAD', 'GET', 'POST', 'OPTIONS']
for method in obj_methods:
env = {'REQUEST_METHOD': method,
'SCRIPT_NAME': '',
'PATH_INFO': '/sda1/p/a/c',
'SERVER_NAME': '127.0.0.1',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.0',
'CONTENT_LENGTH': '0',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': inbuf,
'wsgi.errors': errbuf,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False}
self.controller(env, start_response)
self.assertEquals(errbuf.getvalue(), '')
self.assertEquals(outbuf.getvalue()[:4], '405 ')
def test_GET_log_requests_true(self):
self.controller.logger = FakeLogger()
self.controller.log_requests = True
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertTrue(self.controller.logger.log_dict['info'])
def test_GET_log_requests_false(self):
self.controller.logger = FakeLogger()
self.controller.log_requests = False
req = Request.blank('/sda1/p/a', environ={'REQUEST_METHOD': 'GET'})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 404)
self.assertFalse(self.controller.logger.log_dict['info'])
def test_log_line_format(self):
req = Request.blank(
'/sda1/p/a',
environ={'REQUEST_METHOD': 'HEAD', 'REMOTE_ADDR': '1.2.3.4'})
self.controller.logger = FakeLogger()
with mock.patch(
'time.gmtime', mock.MagicMock(side_effect=[gmtime(10001.0)])):
with mock.patch(
'time.time',
mock.MagicMock(side_effect=[10000.0, 10001.0, 10002.0])):
with mock.patch(
'os.getpid', mock.MagicMock(return_value=1234)):
req.get_response(self.controller)
self.assertEqual(
self.controller.logger.log_dict['info'],
[(('1.2.3.4 - - [01/Jan/1970:02:46:41 +0000] "HEAD /sda1/p/a" 404 '
'- "-" "-" "-" 2.0000 "-" 1234 -',), {})])
def test_policy_stats_with_legacy(self):
ts = itertools.count()
# create the account
req = Request.blank('/sda1/p/a', method='PUT', headers={
'X-Timestamp': normalize_timestamp(next(ts))})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity
# add a container
req = Request.blank('/sda1/p/a/c1', method='PUT', headers={
'X-Put-Timestamp': normalize_timestamp(next(ts)),
'X-Delete-Timestamp': '0',
'X-Object-Count': '2',
'X-Bytes-Used': '4',
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# read back rollup
for method in ('GET', 'HEAD'):
req = Request.blank('/sda1/p/a', method=method)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int // 100, 2)
self.assertEquals(resp.headers['X-Account-Object-Count'], '2')
self.assertEquals(resp.headers['X-Account-Bytes-Used'], '4')
self.assertEquals(
resp.headers['X-Account-Storage-Policy-%s-Object-Count' %
POLICIES[0].name], '2')
self.assertEquals(
resp.headers['X-Account-Storage-Policy-%s-Bytes-Used' %
POLICIES[0].name], '4')
self.assertEquals(
resp.headers['X-Account-Storage-Policy-%s-Container-Count' %
POLICIES[0].name], '1')
def test_policy_stats_non_default(self):
ts = itertools.count()
# create the account
req = Request.blank('/sda1/p/a', method='PUT', headers={
'X-Timestamp': normalize_timestamp(next(ts))})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity
# add a container
non_default_policies = [p for p in POLICIES if not p.is_default]
policy = random.choice(non_default_policies)
req = Request.blank('/sda1/p/a/c1', method='PUT', headers={
'X-Put-Timestamp': normalize_timestamp(next(ts)),
'X-Delete-Timestamp': '0',
'X-Object-Count': '2',
'X-Bytes-Used': '4',
'X-Backend-Storage-Policy-Index': policy.idx,
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# read back rollup
for method in ('GET', 'HEAD'):
req = Request.blank('/sda1/p/a', method=method)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int // 100, 2)
self.assertEquals(resp.headers['X-Account-Object-Count'], '2')
self.assertEquals(resp.headers['X-Account-Bytes-Used'], '4')
self.assertEquals(
resp.headers['X-Account-Storage-Policy-%s-Object-Count' %
policy.name], '2')
self.assertEquals(
resp.headers['X-Account-Storage-Policy-%s-Bytes-Used' %
policy.name], '4')
self.assertEquals(
resp.headers['X-Account-Storage-Policy-%s-Container-Count' %
policy.name], '1')
def test_empty_policy_stats(self):
ts = itertools.count()
# create the account
req = Request.blank('/sda1/p/a', method='PUT', headers={
'X-Timestamp': normalize_timestamp(next(ts))})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity
for method in ('GET', 'HEAD'):
req = Request.blank('/sda1/p/a', method=method)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int // 100, 2)
for key in resp.headers:
self.assert_('storage-policy' not in key.lower())
def test_empty_except_for_used_policies(self):
ts = itertools.count()
# create the account
req = Request.blank('/sda1/p/a', method='PUT', headers={
'X-Timestamp': normalize_timestamp(next(ts))})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity
# starts empty
for method in ('GET', 'HEAD'):
req = Request.blank('/sda1/p/a', method=method)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int // 100, 2)
for key in resp.headers:
self.assert_('storage-policy' not in key.lower())
# add a container
policy = random.choice(POLICIES)
req = Request.blank('/sda1/p/a/c1', method='PUT', headers={
'X-Put-Timestamp': normalize_timestamp(next(ts)),
'X-Delete-Timestamp': '0',
'X-Object-Count': '2',
'X-Bytes-Used': '4',
'X-Backend-Storage-Policy-Index': policy.idx,
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
# only policy of the created container should be in headers
for method in ('GET', 'HEAD'):
req = Request.blank('/sda1/p/a', method=method)
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int // 100, 2)
for key in resp.headers:
if 'storage-policy' in key.lower():
self.assert_(policy.name.lower() in key.lower())
def test_multiple_policies_in_use(self):
ts = itertools.count()
# create the account
req = Request.blank('/sda1/p/a', method='PUT', headers={
'X-Timestamp': normalize_timestamp(next(ts))})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201) # sanity
# add some containers
for policy in POLICIES:
count = policy.idx * 100 # good as any integer
container_path = '/sda1/p/a/c_%s' % policy.name
req = Request.blank(
container_path, method='PUT', headers={
'X-Put-Timestamp': normalize_timestamp(next(ts)),
'X-Delete-Timestamp': '0',
'X-Object-Count': count,
'X-Bytes-Used': count,
'X-Backend-Storage-Policy-Index': policy.idx,
})
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int, 201)
req = Request.blank('/sda1/p/a', method='HEAD')
resp = req.get_response(self.controller)
self.assertEqual(resp.status_int // 100, 2)
# check container counts in roll up headers
total_object_count = 0
total_bytes_used = 0
for key in resp.headers:
if 'storage-policy' not in key.lower():
continue
for policy in POLICIES:
if policy.name.lower() not in key.lower():
continue
if key.lower().endswith('object-count'):
object_count = int(resp.headers[key])
self.assertEqual(policy.idx * 100, object_count)
total_object_count += object_count
if key.lower().endswith('bytes-used'):
bytes_used = int(resp.headers[key])
self.assertEqual(policy.idx * 100, bytes_used)
total_bytes_used += bytes_used
expected_total_count = sum([p.idx * 100 for p in POLICIES])
self.assertEqual(expected_total_count, total_object_count)
self.assertEqual(expected_total_count, total_bytes_used)
@patch_policies([StoragePolicy(0, 'zero', False),
StoragePolicy(1, 'one', True),
StoragePolicy(2, 'two', False),
StoragePolicy(3, 'three', False)])
class TestNonLegacyDefaultStoragePolicy(TestAccountController):
pass
if __name__ == '__main__':
unittest.main()
| zackmdavis/swift | test/unit/account/test_server.py | Python | apache-2.0 | 94,681 |
# coding: utf-8
from __future__ import absolute_import, unicode_literals
from modeltranslation.translator import translator, TranslationOptions
from .models import Document
class DocumentTranslationOptions(TranslationOptions):
fields = ('title', 'text')
translator.register(Document, DocumentTranslationOptions)
| foobacca/django-multilingual-search | tests/testproject/translation.py | Python | mit | 321 |
import sys
import numpy
from collections import defaultdict
contigs = []
genes = {}
introns = {}
confs = {}
strands = {}
with open(sys.argv[1], "r") as f:
previous_end = 0
previous_strand = -1
previous_contig = ""
for line in f:
if line.startswith("#"):
previous_end = 0
previous_strand = 0
if not line.startswith("#"):
#get gene size
contig = line.split()[0]
s = int(line.split()[3])
e = int(line.split()[4])
gene_size = int(e) - int(s)
#get reading strand (-) or (+)
stra = line.split()[6]
if stra == "+":
strand = 1.0
else:
strand = 0.0
if contig != previous_contig:
previous_contig = contig
previous_end = e
previous_strand = strand
contigs.append(contig)
genes[contig] = []
introns[contig] = []
confs[contig] = []
strands[contig] = []
continue
#get intron length
intron_length = s - previous_end
if strand == previous_strand:
strandedness = 1
else:
strandedness = 0
previous_end = e
previous_strand = strand
#get conf
conf = float(line.split("conf=")[1].split(";")[0])
genes[contig].append(gene_size)
introns[contig].append(intron_length)
confs[contig].append(conf)
strands[contig].append(strandedness)
#print str(gene_size) + "," + str(intron_length) + "," + str(conf) + "," + str(strandedness)
#print introns
for contig in contigs:
if len(genes[contig]) > 20:
print contig +"," + str(float(sum(genes[contig])) / len(genes[contig])) + "," + str(float(sum(introns[contig])) / len(introns[contig])) + "," + str(float(sum(confs[contig])) / len(confs[contig])) + "," + str(float(sum(strands[contig])) / len(strands[contig]))
#print confs
| alexcritschristoph/CircHMP | classifier/calculate_training_metrics.py | Python | gpl-2.0 | 1,691 |
"""Module containing index utilities"""
import struct
import tempfile
import os
from functools import wraps
from git.compat import is_win
__all__ = ('TemporaryFileSwap', 'post_clear_cache', 'default_index', 'git_working_dir')
#{ Aliases
pack = struct.pack
unpack = struct.unpack
#} END aliases
class TemporaryFileSwap(object):
"""Utility class moving a file to a temporary location within the same directory
and moving it back on to where on object deletion."""
__slots__ = ("file_path", "tmp_file_path")
def __init__(self, file_path):
self.file_path = file_path
self.tmp_file_path = self.file_path + tempfile.mktemp('', '', '')
# it may be that the source does not exist
try:
os.rename(self.file_path, self.tmp_file_path)
except OSError:
pass
def __del__(self):
if os.path.isfile(self.tmp_file_path):
if is_win and os.path.exists(self.file_path):
os.remove(self.file_path)
os.rename(self.tmp_file_path, self.file_path)
# END temp file exists
#{ Decorators
def post_clear_cache(func):
"""Decorator for functions that alter the index using the git command. This would
invalidate our possibly existing entries dictionary which is why it must be
deleted to allow it to be lazily reread later.
:note:
This decorator will not be required once all functions are implemented
natively which in fact is possible, but probably not feasible performance wise.
"""
@wraps(func)
def post_clear_cache_if_not_raised(self, *args, **kwargs):
rval = func(self, *args, **kwargs)
self._delete_entries_cache()
return rval
# END wrapper method
return post_clear_cache_if_not_raised
def default_index(func):
"""Decorator assuring the wrapped method may only run if we are the default
repository index. This is as we rely on git commands that operate
on that index only. """
@wraps(func)
def check_default_index(self, *args, **kwargs):
if self._file_path != self._index_path():
raise AssertionError(
"Cannot call %r on indices that do not represent the default git index" % func.__name__)
return func(self, *args, **kwargs)
# END wrpaper method
return check_default_index
def git_working_dir(func):
"""Decorator which changes the current working dir to the one of the git
repository in order to assure relative paths are handled correctly"""
@wraps(func)
def set_git_working_dir(self, *args, **kwargs):
cur_wd = os.getcwd()
os.chdir(self.repo.working_tree_dir)
try:
return func(self, *args, **kwargs)
finally:
os.chdir(cur_wd)
# END handle working dir
# END wrapper
return set_git_working_dir
#} END decorators
| expobrain/GitPython | git/index/util.py | Python | bsd-3-clause | 2,887 |
#!/usr/bin/env python
"""Test for the flow state class."""
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib.rdfvalues import flows
from grr.lib.rdfvalues import test_base
class FlowStateTest(test_base.RDFValueTestCase):
rdfvalue_class = rdfvalue.FlowState
def GenerateSample(self, number=0):
res = rdfvalue.FlowState()
res.Register("number", number)
return res
def testComparisons(self):
"""Checks that object comparisons work."""
sample1 = self.GenerateSample(1)
self.assertTrue(sample1 == self.GenerateSample(1))
self.assertFalse(sample1 == self.GenerateSample(2))
self.assertTrue(sample1 != self.GenerateSample(2))
def testFlowState(self):
state = rdfvalue.FlowState()
state.Register("teststate", 1)
state.teststate = 100
state.Register("context", flows.DataObject())
state.context.testcontext = 50
s = state.SerializeToString()
new_state = rdfvalue.FlowState()
new_state.ParseFromString(s)
self.assertEqual(new_state.teststate, 100)
self.assertEqual(new_state.context.testcontext, 50)
# context and teststate
self.assertEqual(len(new_state), 2)
self.assertEqual(len(new_state.context), 1)
def testBadPickle(self):
"""Test that we can recover some of the bad pickle."""
state = rdfvalue.FlowState()
# Store an instance of a RDFURN here.
state.Register("urn", rdfvalue.RDFURN("aff4:/"))
serialized = state.SerializeToString()
# Substitute the class with something entirely different.
with test_lib.Stubber(rdfvalue, "RDFURN", None):
# We now should not be able to restore the state normally since we can not
# find the RDFURN instance.
result = rdfvalue.FlowState(serialized)
# The pickle error should be available here.
self.assertTrue(isinstance(result.errors, TypeError))
# The bad field should be replaced with an UnknownObject instance.
self.assertTrue(isinstance(result.urn, flows.UnknownObject))
# Missing attribute is a different kind of error, but this is still
# trapped.
del rdfvalue.RDFURN
result = rdfvalue.FlowState(serialized)
self.assertTrue(isinstance(result.errors, AttributeError))
self.assertTrue(isinstance(result.urn, flows.UnknownObject))
| spnow/grr | lib/rdfvalues/flows_test.py | Python | apache-2.0 | 2,307 |
"""Graphical time series visualizer and analyzer."""
__version__ = '2021.08.08'
__all__ = [
'algorithms',
'dialogs',
'exporter',
'legend',
'mainui',
'puplot',
'tsplot',
'utils',
'types',
'ui',
'transformations',
]
| jaj42/dyngraph | graphysio/__init__.py | Python | isc | 260 |
from unittest import TestCase
from diycrate.cache_utils import redis_key
class CacheUtilTests(TestCase):
def test_redis_key(self):
self.assertTrue(redis_key("hello").startswith("diy_crate.version."))
| jheld/diycrate | tests/test.py | Python | mit | 215 |
from __main__ import vtk, qt, ctk, slicer
import string
import numpy
import math
import operator
import collections
from functools import reduce
class MorphologyStatistics:
def __init__(self, labelNodeSpacing, matrixSA, matrixSACoordinates, matrixSAValues, allKeys):
self.morphologyStatistics = collections.OrderedDict()
self.morphologyStatisticsTiming = collections.OrderedDict()
self.morphologyStatistics["Volume mm^3"] = 'self.volumeMM3(self.matrixSAValues, self.cubicMMPerVoxel)'
self.morphologyStatistics[
"Volume cc"] = 'self.volumeCC(self.matrixSAValues, self.cubicMMPerVoxel, self.ccPerCubicMM)'
self.morphologyStatistics[
"Surface Area mm^2"] = 'self.surfaceArea(self.matrixSA, self.matrixSACoordinates, self.matrixSAValues, self.labelNodeSpacing)'
self.morphologyStatistics[
"Surface:Volume Ratio"] = 'self.surfaceVolumeRatio(self.morphologyStatistics["Surface Area mm^2"], self.morphologyStatistics["Volume mm^3"])'
self.morphologyStatistics[
"Compactness 1"] = 'self.compactness1(self.morphologyStatistics["Surface Area mm^2"], self.morphologyStatistics["Volume mm^3"])'
self.morphologyStatistics[
"Compactness 2"] = 'self.compactness2(self.morphologyStatistics["Surface Area mm^2"], self.morphologyStatistics["Volume mm^3"])'
self.morphologyStatistics[
"Maximum 3D Diameter"] = 'self.maximum3DDiameter(self.labelNodeSpacing, self.matrixSA, self.matrixSACoordinates)'
self.morphologyStatistics[
"Spherical Disproportion"] = 'self.sphericalDisproportion(self.morphologyStatistics["Surface Area mm^2"], self.morphologyStatistics["Volume mm^3"])'
self.morphologyStatistics[
"Sphericity"] = 'self.sphericityValue(self.morphologyStatistics["Surface Area mm^2"], self.morphologyStatistics["Volume mm^3"])'
self.keys = set(allKeys).intersection(list(self.morphologyStatistics.keys()))
self.labelNodeSpacing = labelNodeSpacing
self.matrixSA = matrixSA
self.matrixSACoordinates = matrixSACoordinates
self.matrixSAValues = matrixSAValues
self.cubicMMPerVoxel = reduce(lambda x, y: x * y, self.labelNodeSpacing)
self.ccPerCubicMM = 0.001
def volumeMM3(self, matrixSA, cubicMMPerVoxel):
return (matrixSA.size * cubicMMPerVoxel)
def volumeCC(self, matrixSA, cubicMMPerVoxel, ccPerCubicMM):
return (matrixSA.size * cubicMMPerVoxel * ccPerCubicMM)
def surfaceArea(self, a, matrixSACoordinates, matrixSAValues, labelNodeSpacing):
x, y, z = labelNodeSpacing
xz = x * z
yz = y * z
xy = x * y
voxelTotalSA = (2 * xy + 2 * xz + 2 * yz)
totalSA = matrixSAValues.size * voxelTotalSA
# in matrixSACoordinates
# i corresponds to height (z)
# j corresponds to vertical (y)
# k corresponds to horizontal (x)
i, j, k = 0, 0, 0
surfaceArea = 0
for voxel in range(0, matrixSAValues.size):
i, j, k = matrixSACoordinates[0][voxel], matrixSACoordinates[1][voxel], matrixSACoordinates[2][voxel]
fxy = (numpy.array([a[i + 1, j, k], a[i - 1, j, k]]) == 0) # evaluate to 1 if true, 0 if false
fyz = (numpy.array([a[i, j + 1, k], a[i, j - 1, k]]) == 0) # evaluate to 1 if true, 0 if false
fxz = (numpy.array([a[i, j, k + 1], a[i, j, k - 1]]) == 0) # evaluate to 1 if true, 0 if false
surface = (numpy.sum(fxz) * xz) + (numpy.sum(fyz) * yz) + (numpy.sum(fxy) * xy)
surfaceArea += surface
return (surfaceArea)
def surfaceVolumeRatio(self, surfaceArea, volumeMM3):
return (surfaceArea / volumeMM3)
def compactness1(self, surfaceArea, volumeMM3):
return ((volumeMM3) / ((surfaceArea) ** (2 / 3.0) * math.sqrt(math.pi)))
def compactness2(self, surfaceArea, volumeMM3):
return ((36 * math.pi) * ((volumeMM3) ** 2) / ((surfaceArea) ** 3))
def maximum3DDiameter(self, labelNodeSpacing, matrixSA, matrixSACoordinates):
# largest pairwise euclidean distance between tumor surface voxels
x, y, z = labelNodeSpacing
minBounds = numpy.array(
[numpy.min(matrixSACoordinates[0]), numpy.min(matrixSACoordinates[1]), numpy.min(matrixSACoordinates[2])])
maxBounds = numpy.array(
[numpy.max(matrixSACoordinates[0]), numpy.max(matrixSACoordinates[1]), numpy.max(matrixSACoordinates[2])])
a = numpy.array(list(zip(*matrixSACoordinates)))
edgeVoxelsMinCoords = numpy.vstack(
[a[a[:, 0] == minBounds[0]], a[a[:, 1] == minBounds[1]], a[a[:, 2] == minBounds[2]]]) * [z, y, x]
edgeVoxelsMaxCoords = numpy.vstack(
[(a[a[:, 0] == maxBounds[0]] + 1), (a[a[:, 1] == maxBounds[1]] + 1), (a[a[:, 2] == maxBounds[2]] + 1)]) * [
z, y, x]
maxDiameter = 1
for voxel1 in edgeVoxelsMaxCoords:
for voxel2 in edgeVoxelsMinCoords:
voxelDistance = numpy.sqrt(numpy.sum((voxel2 - voxel1) ** 2))
if voxelDistance > maxDiameter:
maxDiameter = voxelDistance
return (maxDiameter)
def sphericalDisproportion(self, surfaceArea, volumeMM3):
R = ((0.75 * (volumeMM3)) / (math.pi) ** (1 / 3.0))
return ((surfaceArea) / (4 * math.pi * (R ** 2)))
def sphericityValue(self, surfaceArea, volumeMM3):
return (((math.pi) ** (1 / 3.0) * (6 * volumeMM3) ** (2 / 3.0)) / (surfaceArea))
def EvaluateFeatures(self, printTiming=False, checkStopProcessFunction=None):
# Evaluate dictionary elements corresponding to user-selected keys
if not self.keys:
return self.morphologyStatistics
if len(self.matrixSA) == 0:
for key in self.keys:
self.morphologyStatistics[key] = 0
else:
# Volume and Surface Area are pre-calculated even if only one morphology metric is user-selected
if printTiming:
import time
t1 = time.time()
self.morphologyStatistics["Volume mm^3"] = eval(self.morphologyStatistics["Volume mm^3"])
self.keys.add("Volume mm^3")
if printTiming:
self.morphologyStatisticsTiming["Volume mm^3"] = time.time() - t1
self.morphologyStatistics["Surface Area mm^2"] = eval(self.morphologyStatistics["Surface Area mm^2"])
self.keys.add("Surface Area mm^2")
if printTiming:
t1 = time.time()
self.morphologyStatisticsTiming["Surface Area mm^2"] = time.time() - t1
# Remove all the keys that must not be evaluated
for key in set(self.morphologyStatistics.keys()).difference(self.keys):
self.morphologyStatistics[key] = None
if printTiming:
for key in self.keys:
if isinstance(self.morphologyStatistics[key], str):
t1 = time.time()
self.morphologyStatistics[key] = eval(self.morphologyStatistics[key])
self.morphologyStatisticsTiming[key] = time.time() - t1
if checkStopProcessFunction is not None:
checkStopProcessFunction()
return self.morphologyStatistics, self.morphologyStatisticsTiming
else:
for key in self.keys:
if isinstance(self.morphologyStatistics[key], str):
self.morphologyStatistics[key] = eval(self.morphologyStatistics[key])
if checkStopProcessFunction is not None:
checkStopProcessFunction()
return self.morphologyStatistics
| acil-bwh/SlicerCIP | Scripted/CIP_LesionModel/FeatureExtractionLib/MorphologyStatistics.py | Python | bsd-3-clause | 7,867 |
from ...types import serializable
from ...util import none_or
from ..errors import MalformedXML
from .redirect import Redirect
from .revision import Revision
class Page(serializable.Type):
"""
Page meta data and a :class:`~mw.xml_dump.Revision` iterator. Instances of
this class can be called as iterators directly. E.g.
.. code-block:: python
page = mw.xml_dump.Page( ... )
for revision in page:
print("{0} {1}".format(revision.id, page_id))
"""
__slots__ = (
'id',
'title',
'namespace',
'redirect',
'restrictions'
)
def __init__(self, id, title, namespace, redirect, restrictions, revisions=None):
self.id = none_or(id, int)
"""
Page ID : `int`
"""
self.title = none_or(title, str)
"""
Page title (namespace excluded) : `str`
"""
self.namespace = none_or(namespace, int)
"""
Namespace ID : `int`
"""
self.redirect = none_or(redirect, Redirect)
"""
Page is currently redirect? : :class:`~mw.xml_dump.Redirect` | `None`
"""
self.restrictions = serializable.List.deserialize(restrictions)
"""
A list of page editing restrictions (empty unless restrictions are specified) : list( `str` )
"""
# Should be a lazy generator
self.__revisions = revisions or []
def __iter__(self):
return self.__revisions
def __next__(self):
return next(self.__revisions)
@classmethod
def load_revisions(cls, first_revision, element):
yield Revision.from_element(first_revision)
for sub_element in element:
tag = sub_element.tag
if tag == "revision":
yield Revision.from_element(sub_element)
else:
raise MalformedXML("Expected to see 'revision'. " +
"Instead saw '{0}'".format(tag))
@classmethod
def from_element(cls, element):
title = None
namespace = None
id = None
redirect = None
restrictions = []
first_revision = None
# Consume each of the elements until we see <id> which should come last.
for sub_element in element:
tag = sub_element.tag
if tag == "title":
title = sub_element.text
elif tag == "ns":
namespace = sub_element.text
elif tag == "id":
id = int(sub_element.text)
elif tag == "redirect":
redirect = Redirect.from_element(sub_element)
elif tag == "restrictions":
restrictions.append(sub_element.text)
elif tag == "revision":
first_revision = sub_element
break
# Assuming that the first revision seen marks the end of page
# metadata. I'm not too keen on this assumption, so I'm leaving
# this long comment to warn whoever ends up maintaining this.
else:
raise MalformedXML("Unexpected tag found when processing " +
"a <page>: '{0}'".format(tag))
# Assuming that I got here by seeing a <revision> tag. See verbose
# comment above.
revisions = cls.load_revisions(first_revision, element)
return cls(id, title, namespace, redirect, restrictions, revisions)
| makoshark/Mediawiki-Utilities | mw/xml_dump/iteration/page.py | Python | mit | 3,501 |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pickle
import itertools as it
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.cross_validation import train_test_split, KFold
from sklearn.metrics import confusion_matrix
from score import calc_score
from toolbox import transform_to_submission_format, get_feature_importances
from random import choice
import xgboost as xgb
plt.style.use('ggplot')
""" Recovering the data """
print 'Recovering the data'
store = pd.HDFStore('../Data/enhanced_learning_restricted_data.h5')
data = store['data_users']
data = data.fillna(0)
store.close()
# Deleting a part of the data
#base_size = 30000
#data_country_7 = data[data['country_destination'] == 7]
#_, rows_to_delete = train_test_split(data_country_7, train_size=base_size, random_state=1)
#data = data.drop(rows_to_delete.index, axis=0)
#
#data_country_10 = data[data['country_destination'] == 10]
#_, rows_to_delete = train_test_split(data_country_10, train_size=int(base_size/1.5), random_state=1)
#data = data.drop(rows_to_delete.index, axis=0)
#
#data_country_11 = data[data['country_destination'] == 11]
#_, rows_to_delete = train_test_split(data_country_11, train_size=base_size/8, random_state=1)
#data = data.drop(rows_to_delete.index, axis=0)
data_learn, data_test = train_test_split(data, test_size=0.3, random_state=2)
data_learn, data_valid = train_test_split(data, test_size=0.7, random_state=2)
X = data
X = X.drop('country_destination', axis=1)
y = data['country_destination']
X_learn = data_learn
X_learn = X_learn.drop('country_destination', axis=1)
y_learn = pd.DataFrame(index=X_learn.index)
y_learn['country'] = data_learn['country_destination']
X_valid = data_valid
X_valid = X_valid.drop('country_destination', axis=1)
y_valid = pd.DataFrame(index=X_valid.index)
y_valid['country'] = data_valid['country_destination']
X_test = data_test
X_test = X_test.drop('country_destination', axis=1)
y_test = pd.DataFrame(index=X_test.index)
y_test['country'] = data_test['country_destination']
kf = KFold(len(data), n_folds=3, random_state=1)
""" Learning """
print 'Learning'
""" #### Test: model parameters #### """
#test_name='model_parameters'
#criterion_choices = ['gini', 'entropy']
#n_estimators_choices = range(1, 750)
#min_samples_split_choices = range(10, 5000)
#max_depth_choices = range(2, 50)
#min_samples_leaf_choices = range(10, 5000)
#
#n_experiments = 1000
#
#criterion_exp = []
#n_estimators_exp = []
#min_samples_split_exp = []
#min_samples_leaf_exp = []
#max_depth_exp = []
#scores = []
#
#for n_experiment in range(n_experiments):
# criterion_exp.append(choice(criterion_choices))
# n_estimators_exp.append(choice(n_estimators_choices))
# min_samples_split_exp.append(choice(max_depth_choices))
# min_samples_leaf_exp.append(choice(min_samples_leaf_choices))
# max_depth_exp.append(choice(max_depth_choices))
#
# classif = RandomForestClassifier(n_estimators=n_estimators_exp[-1],
# criterion=criterion_exp[-1],
# random_state=0,
# min_samples_split=min_samples_split_exp[-1],
# max_depth=max_depth_exp[-1],
# min_samples_leaf=min_samples_leaf_exp[-1],
# n_jobs=-1)
#
# classif.fit(X_learn, y_learn)
#
# """ Converting the proba into 5 best guesses """
# proba_countries = classif.predict_proba(X_valid)
# find_5_best_countries = lambda x: x.argsort()[-5:][::-1]
# best_guesses = np.apply_along_axis(find_5_best_countries, 1, proba_countries)
# predictions = pd.DataFrame(best_guesses, index=y_valid.index)
#
# print '--------------------'
# print 'criterion = %s' % criterion_exp[-1]
# print 'min_samples_split = %s' % min_samples_split_exp[-1]
# print 'max_depth = %s' % max_depth_exp[-1]
# print 'min_samples_leaf = %s' % min_samples_leaf_exp[-1]
# scores.append(calc_score(predictions, y_valid))
# print 'Score = %s' % scores[-1]
#
# if n_experiment % 20 == 0 and n_experiment > 0:
# data_score = pd.DataFrame({'Criterion': criterion_exp,
# 'n_estimators': n_estimators_exp,
# 'min_samples_split': min_samples_split_exp,
# 'max_depth': max_depth_exp,
# 'min_samples_leaf': min_samples_leaf_exp,
# 'score': scores})
#
# data_score.to_csv('../Lab/%s.csv' % test_name)
""" #### Test: number of features #### """
#test_name='number_features'
#scores = []
#
#classif_base = RandomForestClassifier(n_estimators=186,
# criterion='entropy',
# random_state=0,
# min_samples_split=30,
# max_depth=16,
# min_samples_leaf=11,
# n_jobs=-1)
#classif_base.fit(X_learn, y_learn)
#
#fi = [(name, value) for (name,value) in zip(X_learn.columns.values.tolist(),
# classif_base.feature_importances_)]
#fi = sorted(fi, key=lambda x: x[1], reverse=True)
#features = [f[0] for f in fi]
#features_to_keep = features[:200]
#
#""" Plotting figure importances """
##fi_ = [x[1] for x in fi]
##plt.bar(range(len(fi_)), fi_)
##print features[:10]
##plt.show()
#
#with open('../Data/features_to_keep.dt', 'w') as f:
# pickle.dump(features_to_keep, f)
#
#for n_features in range(1, len(features)):
# classif = RandomForestClassifier(**classif_base.get_params())
#
# X_learn_ = X_learn[features[:n_features]]
# X_valid_ = X_valid[features[:n_features]]
# classif.fit(X_learn_, y_learn)
#
# """ Converting the proba into 5 best guesses """
# proba_countries = classif.predict_proba(X_valid_)
# find_5_best_countries = lambda x: x.argsort()[-5:][::-1]
# best_guesses = np.apply_along_axis(find_5_best_countries, 1, proba_countries)
# predictions = pd.DataFrame(best_guesses, index=y_valid.index)
#
# print '--------------------'
# print 'n_features = %s' % n_features
# scores.append(calc_score(predictions, y_valid))
# print 'Score = %s' % scores[-1]
#
# if n_features % 5 == 0:
# data_score = pd.DataFrame({'n_features': range(n_features),
# 'score': scores})
#
# data_score.to_csv('../Lab/%s.csv' % test_name)
""" Test: simple test """
#with open('../Data/features_to_keep.dt', 'r') as f:
# features_to_keep = pickle.load(f)
scores = []
#for train,test in kf:
for _ in range(1):
#X_learn, X_valid, y_learn, y_valid = X.iloc[train], X.iloc[test], \
# y.iloc[train], y.iloc[test]
#y_valid = pd.DataFrame({'country': y_valid})
#y_test = pd.DataFrame({'country': y_test})
""" RANDOM FOREST """
classif_base = RandomForestClassifier(n_estimators=300,
criterion='entropy',
random_state=0,
min_samples_split=1000,
max_depth=10,
min_samples_leaf=100,
n_jobs=-1)
classif = RandomForestClassifier(**classif_base.get_params())
""" GRADIENT BOOSTING """
#classif_base = GradientBoostingClassifier(loss='deviance',
# learning_rate=0.25,
# n_estimators=20,
# max_depth=5,
# min_samples_split=50,
# min_samples_leaf=100,
# random_state=0,
# verbose=True)
#classif = GradientBoostingClassifier(**classif_base.get_params())
""" XGBOOST """
xg_train = xgb.DMatrix(X_learn, label=y_learn)
xg_valid = xgb.DMatrix(X_valid, label=y_valid)
xg_test = xgb.DMatrix(X_test, label=y_test)
param = {}
param['objective'] = 'multi:softprob'
param['eta'] = 0.3
param['gamma'] = 5.
param['max_depth'] = 6
param['learning_rate'] = 0.1
param['subsample'] = 1.
param['colsample_bytree'] = 1.
param['min_child_weight'] = 100
param['silent'] = 1
param['nthread'] = 4
param['num_class'] = 12
watchlist = [ (xg_train, 'train'), (xg_valid, 'test') ]
num_boost_round = 20
early_stopping_rounds = 5
classif = xgb.train(param, xg_train, early_stopping_rounds=early_stopping_rounds,
num_boost_round=num_boost_round,
evals=watchlist)
#proba_countries = classif.predict( xg_test, ntree_limit=classif.best_ntree_limit )
#classif.fit(X_learn, y_learn)
""" Converting the proba into 5 best guesses """
#proba_countries = classif.predict_proba(X_valid_)
def score(X, y):
X = xgb.DMatrix(X, label=y)
proba_countries = classif.predict( X, ntree_limit=classif.best_ntree_limit )
#proba_countries = classif.predict_proba(X)
find_5_best_countries = lambda x: x.argsort()[-5:][::-1]
best_guesses = np.apply_along_axis(find_5_best_countries, 1, proba_countries)
predictions = pd.DataFrame(best_guesses, index=y.index)
print calc_score(predictions, y)
score(X_learn, y_learn)
score(X_valid, y_valid)
score(X_test, y_test)
#print np.array(get_feature_importances(classif, X_learn_)[:20])
#import pdb; pdb.set_trace()
#miss_rows = predictions[predictions[0] != y_valid['country']]
#miss_rows = pd.concat([y_valid.loc[miss_rows.index], miss_rows], axis=1)
#confmat = confusion_matrix(miss_rows.iloc[:,0], miss_rows.iloc[:,1])
#miss_rows_710 = miss_rows[(miss_rows['country']==10) & (miss_rows[0]==7)]
#import pdb; pdb.set_trace()
#print '----------------------'
#print 'Mean score = %s' % np.mean(scores)
#print 'Std score = %s' % np.std(scores)
| RomainSabathe/kaggle_airbnb2015 | Code/lab.py | Python | mit | 10,237 |
from PyQt4 import QtGui
from utils import *
import pyqtgraph as pqg
import numpy as np
from colorButton import ColorButton
import traceitem
class ImageItem():
def __init__(self, imageTab):
self.imageTab = imageTab
self.sliceTable = None
self.name = None
self.sliceParams = {}
self.ds = None
self.valName = None
self.sliceSpins = {}
self.axis = {}
self.axisNames = {}
self.gradient = None
self.levels = None
self.time = None
self.rgb = False
self.isocurves = []
self.isoLevelMin = float('inf')
self.isoLevelMax = float('-inf')
self.isoGradient = self.imageTab.gradient_isocurve
self.savedGradient = None
self.isoWidth = 3
self.displayIso = True
self.isoIndex = 0
self.updateSlice = True
def copy_data(self, other):
if isinstance(other, ImageItem):
self.ds = other.ds
self.valName = other.valName
self.sliceParams = other.sliceParams.copy()
self.axis = other.axis.copy()
self.axisNames = other.axisNames.copy()
self.gradient = other.gradient
self.levels = other.levels
self.time = other.time
self.rgb = other.rgb
self.isoLevelMin = other.isoLevelMin
self.isoLevelMax = other.isoLevelMax
self.isoWidth = other.isoWidth
self.displayIso = other.displayIso
for curve in other.isocurves:
width = curve.width
color = curve.color
level = curve.level
self.add_isocurve(level, color, width)
elif isinstance(other, traceitem.TraceItem):
self.ds = other.ds
self.valName = other.valName
args = self.ds.get_args_to_val(self.valName)
if len(args) < 2:
self.delete()
raise GraphException("{} does not have two dimensions to plot.".format(self.valName))
xIndex = args.index(other.argName)
yIndex = (xIndex + 1) % len(args)
self.axis = {'x': xIndex, 'y': yIndex, 't': None, 'c': None}
self.axisNames = {'x': args[xIndex], 'y': args[yIndex], 't': 'None', 'c': 'None'}
self.sliceParams = other.sliceParams.copy()
self.sliceParams[args[yIndex]] = (0, self.ds.get_arg_shape(args[yIndex]))
def name(self):
return self.name
def setName(self, newName):
"""Set SELF's name to NEWNAME."""
newName = str(newName)
if newName == self.name:
return
validName = self.imageTab.rename(self, newName)
self.name = validName
self.item_name.setText(validName)
def set_axis(self, xAxis, yAxis, tAxis='None', cAxis='None'):
correctLength = 4
if tAxis == 'None' and cAxis == 'None':
correctLength = 3
if len(set([xAxis, yAxis, tAxis, cAxis])) != correctLength:
message = "Duplicates selected. Won't update image until proper selection."
raise GraphException(message)
args = self.ds.get_args_to_val(self.valName)
if xAxis not in args:
raise GraphException("{} is not in {}".format(xAxis, self.ds.name))
if yAxis not in args:
raise GraphException("{} is not in {}".format(yAxis, self.ds.name))
if tAxis != 'None' and tAxis not in args:
raise GraphException("{} is not in {}".format(tAxis, self.ds.name))
if cAxis != 'None' and cAxis not in args:
raise GraphException("{} is not in {}".format(cAxis, self.ds.name))
xIndex = args.index(xAxis)
yIndex = args.index(yAxis)
tIndex = None
cIndex = None
if tAxis != 'None':
tIndex = args.index(tAxis)
if cAxis != 'None':
cIndex = args.index(cAxis)
self.axis = {'x': xIndex, 'y': yIndex, 't': tIndex, 'c': cIndex}
self.axisNames = {'x': xAxis, 'y': yAxis, 't': tAxis, 'c': cAxis}
for arg in args:
self.sliceParams[arg] = 0
self.sliceParams[xAxis] = (0, self.ds.get_arg_shape(xAxis))
self.sliceParams[yAxis] = (0, self.ds.get_arg_shape(yAxis))
if tAxis != 'None':
self.sliceParams[tAxis] = (0, self.ds.get_arg_shape(tAxis))
self.rgb = False
if cAxis != 'None':
self.sliceParams[cAxis] = (0, self.ds.get_arg_shape(cAxis))
gradient = self.imageTab.plot.getHistogramWidget().gradient
gradient.loadPreset('grey')
self.rgb = True
self.update_slice_table(self.imageTab.table_slice)
self.display_image()
def set_data_source(self, ds, valName):
self.ds = ds
self.valName = valName
self.sliceParams.clear()
args = ds.get_args_to_val(valName)
for arg in args:
self.sliceParams[arg] = 0
if len(args) < 2:
raise GraphException("Not enough dimensions to create an image.")
else:
self.set_axis(args[0], args[1])
def display_image(self):
image = self.imageTab.plot
tVals = None
if self.axis['t'] is not None:
args = self.ds.get_args_to_val(self.valName)
tArg = args[self.axis['t']]
tSlice = slice(*self.sliceParams[tArg])
tVals = self.ds.load_arg(tArg, tSlice)
axis = self._convert_axis()
s = self.ds.gen_slice(self.valName, self.sliceParams)
data = self.ds.load_val(self.valName, s)
data, axisDict = self.swap_axis(data, axis)
image.setImage(data, levels=self.levels,
axes=axisDict, xvals=tVals)
def swap_axis(self, data, axisDict):
"""Shuffles the axis in DATA around until they are
in t, x, y, c order. With the t, x ,y, c axis specified by
AXISDICT. Returns the new data, as well as the new axisDict."""
if axisDict['t'] is not None:
tIndex = axisDict['t']
for k, v in axisDict.iteritems():
if v is None:
continue
if v < tIndex:
axisDict[k] += 1
data = np.rollaxis(data, tIndex, 0)
xIndex = axisDict['x']
for k, v in axisDict.iteritems():
if v is None:
continue
if 0 < v < xIndex:
axisDict[k] += 1
data = np.rollaxis(data, xIndex, 1)
yIndex = axisDict['y']
for k, v in axisDict.iteritems():
if v is None:
continue
if 1 < v < yIndex:
axisDict[k] += 1
data = np.rollaxis(data, yIndex, 2)
cIndex = None
if axisDict['c'] is not None:
cIndex = 3
return data, {'t': 0, 'x': 1, 'y': 2, 'c': cIndex}
else:
xIndex = axisDict['x']
for k, v in axisDict.iteritems():
if v is None:
continue
if v < xIndex:
axisDict[k] += 1
data = np.rollaxis(data, xIndex, 0)
yIndex = axisDict['y']
for k, v in axisDict.iteritems():
if v is None:
continue
if 0 < v < yIndex:
axisDict[k] += 1
data = np.rollaxis(data, yIndex, 1)
cIndex = None
if axisDict['c'] is not None:
cIndex = 3
return data, {'x': 0, 'y': 1, 't': None, 'c': cIndex}
def toggleUpdate(self):
raise NotImplementedError()
def delete(self):
"""Delete this imageItem, and all associated Isocurves and data."""
for curve in self.isocurves:
self.delete_iso(curve)
self.imageTab.delete_image(self)
del self.imageTab
def add_to_image_table(self, imageTable):
table = imageTable
row = table.rowCount()
table.setRowCount(row + 1)
self.item_name = QtGui.QTableWidgetItem(self.name)
self.item_name.image = self
table.setItem(row, 0, self.item_name)
self.checkBoxUpdate = QtGui.QCheckBox()
self.checkBoxUpdate.setChecked(False)
self.checkBoxUpdate.stateChanged.connect(self.toggleUpdate)
table.setCellWidget(row, 1, self.checkBoxUpdate)
btn_delete = QtGui.QPushButton('Delete')
btn_delete.clicked.connect(self.delete)
table.setCellWidget(row, 2, btn_delete)
table.resizeColumnsToContents()
table.horizontalHeader().setStretchLastSection(True)
def update_slice_table(self, sliceTable):
if self.valName is None:
sliceTable.clear()
sliceTable.setHorizontalHeaderLabels(['axis', 'indices', 'values'])
sliceTable.resizeColumnsToContents()
sliceTable.horizontalHeader().setStretchLastSection(True)
return
self.sliceTable = sliceTable
sliceTable.clear()
sliceTable.setHorizontalHeaderLabels(['axis', 'indices', 'values'])
sliceTable.setRowCount(len(self.sliceParams))
args = self.ds.get_args_to_val(self.valName)
axisArgs = []
for i in self.axis.values():
if i is not None:
axisArgs += [args[i]]
row = 0
for argName in axisArgs:
argItem = QtGui.QTableWidgetItem(argName)
sliceTable.setItem(row, 0, argItem)
minBounds = (0, self.ds.get_arg_shape(argName) - 2)
maxBounds = (1, self.ds.get_arg_shape(argName) - 1)
spin_sliceMin = pqg.SpinBox(bounds=minBounds, step=1.0, int=True)
spin_sliceMin.setValue(self.sliceParams[argName][0])
spin_sliceMax = pqg.SpinBox(bounds=maxBounds, step=1.0, int=True)
spin_sliceMax.setValue(self.sliceParams[argName][1])
self.sliceSpins[argName] = (spin_sliceMin, spin_sliceMax)
spin_sliceMin.sigValueChanged.connect(self.update_slice)
spin_sliceMax.sigValueChanged.connect(self.update_slice)
spinLayout = QtGui.QHBoxLayout()
spinLayout.addWidget(spin_sliceMin)
spinLayout.addWidget(spin_sliceMax)
cellWidget = QtGui.QWidget()
cellWidget.setLayout(spinLayout)
sliceTable.setCellWidget(row, 1, cellWidget)
sliceTable.setCellWidget(row, 2, QtGui.QLabel())
row += 1
for argName, s in self.sliceParams.iteritems():
if argName in axisArgs:
continue
argItem = QtGui.QTableWidgetItem(argName)
sliceTable.setItem(row, 0, argItem)
bounds = (0, self.ds.get_arg_shape(argName) - 1)
spin = pqg.SpinBox(bounds=bounds, step=1.0, int=True)
spin.setValue(self.sliceParams[argName])
spin.sigValueChanged.connect(self.update_slice)
self.sliceSpins[argName] = spin
sliceTable.setCellWidget(row, 1, spin)
sliceTable.setCellWidget(row, 2, QtGui.QLabel())
row += 1
self.update_slice_labels()
sliceTable.resizeColumnsToContents()
sliceTable.resizeRowsToContents()
sliceTable.horizontalHeader().setStretchLastSection(True)
def update_slice_labels(self):
for row in range(self.sliceTable.rowCount()):
argName = str(self.sliceTable.item(row, 0).text())
s = self.sliceParams[argName]
if isinstance(s, tuple):
valString = '[' + str(self.ds.load_arg(argName, s[0])) + ', '
valString += str(self.ds.load_arg(argName, s[1] - 1)) + ']'
else:
valString = str(self.ds.load_arg(argName, s))
label = self.sliceTable.cellWidget(row, 2)
label.setText(valString)
def update_slice(self):
if self.updateSlice == False:
return
self.updateSlice = False
for argName, spins in self.sliceSpins.iteritems():
if isinstance(spins, tuple):
sliceMin = spins[0]
sliceMax = spins[1]
sliceMin.setMaximum(sliceMax.value() - 1)
sliceMax.setMinimum(sliceMin.value() + 1)
sMin = sliceMin.value()
sMax = sliceMax.value() + 1
self.sliceParams[argName] = (sMin, sMax)
else:
self.sliceParams[argName] = spins.value()
self.update_slice_labels()
levels = self.imageTab.plot.get_levels()
self.display_image()
self.imageTab.plot.setLevels(*levels)
self.updateSlice = True
def _convert_axis(self):
axis = []
for k, v in self.axis.iteritems():
axis += [[k, v]]
axis = sorted(axis, key=lambda item: item[1])
axisDict = {}
index = 0
for i in range(len(axis)):
if axis[i][1] is None:
axisDict[axis[i][0]] = None
else:
axisDict[axis[i][0]] = index
index += 1
return axisDict
def new_isocurve(self, table):
"""Create a new isocurve."""
width = self.isoWidth
level = 0
if len(self.isocurves) == 0:
color = self.isoGradient.getColor(0.5)
isocurve = self.add_isocurve(level, color, width)
self.add_isocurve_to_table(isocurve)
self.isoLevelMin = 0
self.isoLevelMax = 1
elif len(self.isocurves) == 1:
other = self.isocurves[0]
if other.level > level:
otherColor = self.isoGradient.getColor(1.0)
other.setPen(otherColor, width=other.width)
color = self.isoGradient.getColor(0.0)
self.isoLevelMin = level
self.isoLevelMax = other.level
if other.level < level:
otherColor = self.isoGradient.getColor(0.0)
other.setPen(otherColor, width=other.width)
color = self.isoGradient.getColor(1.0)
self.isoLevelMin = other.level
self.isoLevelMax = level
else:
level += 1
otherColor = self.isoGradient.getColor(0.0)
other.setPen(otherColor, width=other.width)
color = self.isoGradient.getColor(1.0)
self.isoLevelMin = other.level
self.isoLevelMax = level
isocurve = self.add_isocurve(level, color, width)
self.add_isocurve_to_table(isocurve)
else:
if level < self.isoLevelMin:
self.isoLevelMin = level
color = self.isoGradient.getColor(0.0)
isocurve = self.add_isocurve(level, color, width)
self.add_isocurve_to_table(isocurve)
self.update_iso_colors()
if level > self.isoLevelMax:
self.isoLevelMax = level
color = self.isoGradient.getColor(1.0)
isocurve = self.add_isocurve(level, color, width)
self.add_isocurve_to_table(isocurve)
self.update_iso_colors()
else:
x = (level - self.isoLevelMin)
x /= (self.isoLevelMax - self.isoLevelMin)
color = self.isoGradient.getColor(1.0)
isocurve = self.add_isocurve(level, color, width)
self.add_isocurve_to_table(isocurve)
if self.displayIso:
self.display_isocurve(isocurve)
def add_isocurve(self, level, color, width):
"""Create and return a new isocurve."""
pen = pqg.mkPen(color, width=width)
curve = pqg.IsocurveItem(None, level, pen)
curve.width = width
curve.color = color
curve.horzLine = pqg.InfiniteLine(pos=level, angle=0, pen=pen)
curve.vertLine = pqg.InfiniteLine(pos=level, angle=90, pen=pen)
curve.id = self.isoIndex
self.isoIndex += 1
self.isocurves += [curve]
return curve
def add_isocurve_to_table(self, curve):
"""Add the isocurve CURVE to self.imageTab.table_isocurve."""
table = self.imageTab.table_isocurve
row = table.rowCount()
table.setRowCount(row + 1)
spin = pqg.SpinBox(value=curve.level, step=.1, dec=True, minStep=.1)
setLevel = lambda: self.set_iso_level(curve, spin)
spin.sigValueChanging.connect(setLevel)
table.setCellWidget(row, 0, spin)
curve.spinLevel = spin
cButton = ColorButton(color=curve.color)
setColor = lambda: self.set_iso_color(curve, cButton)
cButton.sigColorChanging.connect(setColor)
table.setCellWidget(row, 1, cButton)
curve.colorBtn = cButton
spin2 = pqg.SpinBox(value=curve.width, step=.1, dec=True, minStep=.1)
setWidth = lambda: self.set_iso_width(curve, spin2)
spin2.sigValueChanging.connect(setWidth)
table.setCellWidget(row, 2, spin2)
curve.spinWidth = spin2
btnDelete = QtGui.QPushButton('delete')
btnDelete.id = curve.id
deleteIso = lambda: self.delete_iso(curve)
btnDelete.clicked.connect(deleteIso)
table.setCellWidget(row, 3, btnDelete)
def delete_iso(self, curve):
curve.setData(None)
curve.setParentItem(None)
self.imageTab.plot.ui.plot_slice_horiz.removeItem(curve.horzLine)
self.imageTab.plot.ui.plot_slice_vert.removeItem(curve.vertLine)
index = -1
for i in range(len(self.isocurves)):
c = self.isocurves[i]
if c.id == curve.id:
index = i
if index == -1:
raise Exception("Curve not found")
self.isocurves.pop(index)
table = self.imageTab.table_isocurve
index = -1
for row in range(table.rowCount()):
btn = table.cellWidget(row, 3)
if btn.id == curve.id:
index = row
if index == -1:
raise Exception("Curve not found")
self.imageTab.table_isocurve.removeRow(index)
if len(self.isocurves) == 0:
self.isoLevelMin = float('inf')
self.isoLevelMax = float('-inf')
elif curve.level in [self.isoLevelMin, self.isoLevelMax]:
levels = [c.level for c in self.isocurves]
self.isoLevelMin = min(levels)
self.isoLevelMax = max(levels)
self.update_iso_colors()
del curve
def set_iso_level(self, curve, spin):
level = spin.value()
prevLevel = curve.level
curve.setLevel(level)
curve.horzLine.setValue(level)
curve.vertLine.setValue(level)
if prevLevel == self.isoLevelMax and level < prevLevel:
levels = [iso.level for iso in self.isocurves]
if prevLevel not in levels:
self.isoLevelMax = level
self.update_iso_colors()
if prevLevel == self.isoLevelMin and level > prevLevel:
levels = [iso.level for iso in self.isocurves]
if prevLevel not in levels:
self.isoLevelMin = level
self.update_iso_colors()
if level > self.isoLevelMax:
self.isoLevelMax = level
self.update_iso_colors()
if level < self.isoLevelMin:
self.isoLevelMin = level
self.update_iso_colors()
if self.isoLevelMin < level < self.isoLevelMax:
x = (level - self.isoLevelMin)
x /= (self.isoLevelMax - self.isoLevelMin)
color = self.isoGradient.getColor(x)
curve.colorBtn.setColor(color)
curve.color = color
curve.setPen(color, width=curve.width)
curve.horzLine.setPen(curve.color, width=curve.width)
curve.vertLine.setPen(curve.color, width=curve.width)
def set_iso_width(self, curve, spin):
curve.width = spin.value()
curve.setPen(curve.color, width=curve.width)
curve.horzLine.setPen(curve.color, width=curve.width)
curve.vertLine.setPen(curve.color, width=curve.width)
def set_iso_color(self, curve, colorBtn):
color = colorBtn.color()
curve.color = color
curve.setPen(color, width=curve.width)
curve.horzLine.setPen(curve.color, width=curve.width)
curve.vertLine.setPen(curve.color, width=curve.width)
def update_iso_colors(self):
"""Update all the isocurve colors to match the new gradient or
new bounds."""
if self.isoLevelMin == self.isoLevelMax:
self.isoLevelMin -= 1
for i in range(len(self.isocurves)):
curve = self.isocurves[i]
x = (curve.level - self.isoLevelMin)
x /= (self.isoLevelMax - self.isoLevelMin)
color = self.isoGradient.getColor(x)
curve.colorBtn.setColor(color)
curve.color = color
curve.setPen(color, width=curve.width)
curve.horzLine.setPen(curve.color, width=curve.width)
curve.vertLine.setPen(curve.color, width=curve.width)
def update_iso_widths(self):
width = self.imageTab.spin_width.value()
for curve in self.isocurves:
curve.spinWidth.setValue(width)
def display_isocurve(self, curve):
"""Display the curve on the current plot."""
data = self.imageTab.plot.imageItem.image
curve.setParentItem(self.imageTab.plot.imageItem)
curve.setData(data)
self.imageTab.plot.ui.plot_slice_horiz.addItem(curve.horzLine)
self.imageTab.plot.ui.plot_slice_vert.addItem(curve.vertLine)
def update_iso_data(self):
if self.displayIso is False:
return
data = self.imageTab.plot.imageItem.image
for curve in self.isocurves:
curve.setData(data)
def init_isocurves(self):
for curve in self.isocurves:
self.add_isocurve_to_table(curve)
if self.displayIso:
self.display_isocurves()
def remove_isocurves(self):
for curve in self.isocurves:
curve.setData(None)
curve.setParentItem(None)
self.imageTab.plot.ui.plot_slice_horiz.removeItem(curve.horzLine)
self.imageTab.plot.ui.plot_slice_vert.removeItem(curve.vertLine)
def display_isocurves(self):
for curve in self.isocurves:
self.display_isocurve(curve)
| bencorbett90/Graph | graph/imageitem.py | Python | gpl-2.0 | 22,662 |
"""Implicit module which returns token-expiry time from Flask-security."""
from flask_security import views
from werkzeug.datastructures import MultiDict
from flask import jsonify, after_this_request
from flask_security.utils import login_user
def _render_json(app,
form,
include_user=True,
include_auth_token=False,
include_token_expiry=False):
has_errors = len(form.errors) > 0
if has_errors:
code = 400
response = dict(errors=form.errors)
else:
code = 200
response = dict()
if include_user:
response['user'] = dict(id=str(form.user.id))
if include_auth_token:
token = form.user.get_auth_token()
response['user']['authentication_token'] = token
token_age = 'SECURITY_TOKEN_MAX_AGE'
if include_token_expiry and (token_age in app.config):
response['user']['token_age'] = app.config[token_age]
return jsonify(dict(meta=dict(code=code), response=response))
def _commit(response=None):
views._datastore.commit()
return response
def login_with_token(request, app):
"""Return token on a login-request for a given app."""
form_class = views._security.login_form
if request.json:
form = form_class(MultiDict(request.json))
else:
form = form_class()
if form.validate_on_submit():
login_user(form.user, remember=form.remember.data)
after_this_request(_commit)
if request.json:
return _render_json(app,
form,
include_auth_token=True,
include_token_expiry=True)
| nitred/no_imagination | server/flask_app/app_utils/token_login.py | Python | mit | 1,717 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Radim Rehurek <me@radimrehurek.com>
#
# This code is distributed under the terms and conditions
# from the MIT License (MIT).
"""
Utilities for streaming from several file-like data storages: S3 / HDFS / standard
filesystem / compressed files..., using a single, Pythonic API.
The streaming makes heavy use of generators and pipes, to avoid loading
full file contents into memory, allowing work with arbitrarily large files.
The main methods are:
* `smart_open()`, which opens the given file for reading/writing
* `s3_iter_bucket()`, which goes over all keys in an S3 bucket in parallel
"""
import logging
import os
import subprocess
import sys
import requests
import io
IS_PY2 = (sys.version_info[0] == 2)
if IS_PY2:
import httplib
elif sys.version_info[0] == 3:
import http.client as httplib
from boto.compat import BytesIO, urlsplit, six
import boto.s3.connection
import boto.s3.key
from ssl import SSLError
logger = logging.getLogger(__name__)
# Multiprocessing is unavailable in App Engine (and possibly other sandboxes).
# The only method currently relying on it is s3_iter_bucket, which is instructed
# whether to use it by the MULTIPROCESSING flag.
MULTIPROCESSING = False
try:
import multiprocessing.pool
MULTIPROCESSING = True
except ImportError:
logger.warning("multiprocessing could not be imported and won't be used")
from itertools import imap
from . import gzipstreamfile
S3_MIN_PART_SIZE = 50 * 1024**2 # minimum part size for S3 multipart uploads
WEBHDFS_MIN_PART_SIZE = 50 * 1024**2 # minimum part size for HDFS multipart uploads
def smart_open(uri, mode="rb", **kw):
"""
Open the given S3 / HDFS / filesystem file pointed to by `uri` for reading or writing.
The only supported modes for now are 'rb' (read, default) and 'wb' (replace & write).
The reads/writes are memory efficient (streamed) and therefore suitable for
arbitrarily large files.
The `uri` can be either:
1. a URI for the local filesystem (compressed ``.gz`` or ``.bz2`` files handled automatically):
`./lines.txt`, `/home/joe/lines.txt.gz`, `file:///home/joe/lines.txt.bz2`
2. a URI for HDFS: `hdfs:///some/path/lines.txt`
3. a URI for Amazon's S3 (can also supply credentials inside the URI):
`s3://my_bucket/lines.txt`, `s3://my_aws_key_id:key_secret@my_bucket/lines.txt`
4. an instance of the boto.s3.key.Key class.
Examples::
>>> # stream lines from http; you can use context managers too:
>>> with smart_open.smart_open('http://www.google.com') as fin:
... for line in fin:
... print line
>>> # stream lines from S3; you can use context managers too:
>>> with smart_open.smart_open('s3://mybucket/mykey.txt') as fin:
... for line in fin:
... print line
>>> # you can also use a boto.s3.key.Key instance directly:
>>> key = boto.connect_s3().get_bucket("my_bucket").get_key("my_key")
>>> with smart_open.smart_open(key) as fin:
... for line in fin:
... print line
>>> # stream line-by-line from an HDFS file
>>> for line in smart_open.smart_open('hdfs:///user/hadoop/my_file.txt'):
... print line
>>> # stream content *into* S3:
>>> with smart_open.smart_open('s3://mybucket/mykey.txt', 'wb') as fout:
... for line in ['first line', 'second line', 'third line']:
... fout.write(line + '\n')
>>> # stream from/to (compressed) local files:
>>> for line in smart_open.smart_open('/home/radim/my_file.txt'):
... print line
>>> for line in smart_open.smart_open('/home/radim/my_file.txt.gz'):
... print line
>>> with smart_open.smart_open('/home/radim/my_file.txt.gz', 'wb') as fout:
... fout.write("hello world!\n")
>>> with smart_open.smart_open('/home/radim/another.txt.bz2', 'wb') as fout:
... fout.write("good bye!\n")
>>> # stream from/to (compressed) local files with Expand ~ and ~user constructions:
>>> for line in smart_open.smart_open('~/my_file.txt'):
... print line
>>> for line in smart_open.smart_open('my_file.txt'):
... print line
"""
# validate mode parameter
if not isinstance(mode, six.string_types):
raise TypeError('mode should be a string')
if isinstance(uri, six.string_types):
# this method just routes the request to classes handling the specific storage
# schemes, depending on the URI protocol in `uri`
parsed_uri = ParseUri(uri)
if parsed_uri.scheme in ("file", ):
# local files -- both read & write supported
# compression, if any, is determined by the filename extension (.gz, .bz2)
return file_smart_open(parsed_uri.uri_path, mode)
elif parsed_uri.scheme in ("s3", "s3n", "s3u"):
kwargs = {}
# Get an S3 host. It is required for sigv4 operations.
host = kw.pop('host', parsed_uri.host)
port = kw.pop('port', parsed_uri.port)
if port != 443:
kwargs['port'] = port
if not kw.pop('is_secure', parsed_uri.scheme != 's3u'):
kwargs['is_secure'] = False
# If the security model docker is overridden, honor the host directly.
kwargs['calling_format'] = boto.s3.connection.OrdinaryCallingFormat()
# For credential order of precedence see
# http://boto.cloudhackers.com/en/latest/boto_config_tut.html#credentials
s3_connection = boto.connect_s3(
aws_access_key_id=parsed_uri.access_id,
host=host,
aws_secret_access_key=parsed_uri.access_secret,
profile_name=kw.pop('profile_name', None),
**kwargs)
bucket = s3_connection.get_bucket(parsed_uri.bucket_id)
if mode in ('r', 'rb'):
key = bucket.get_key(parsed_uri.key_id)
if key is None:
raise KeyError(parsed_uri.key_id)
return S3OpenRead(key)
elif mode in ('w', 'wb'):
key = bucket.get_key(parsed_uri.key_id, validate=False)
if key is None:
raise KeyError(parsed_uri.key_id)
return S3OpenWrite(key, **kw)
else:
raise NotImplementedError("file mode %s not supported for %r scheme", mode, parsed_uri.scheme)
elif parsed_uri.scheme in ("hdfs", ):
if mode in ('r', 'rb'):
return HdfsOpenRead(parsed_uri, **kw)
if mode in ('w', 'wb'):
return HdfsOpenWrite(parsed_uri, **kw)
else:
raise NotImplementedError("file mode %s not supported for %r scheme", mode, parsed_uri.scheme)
elif parsed_uri.scheme in ("webhdfs", ):
if mode in ('r', 'rb'):
return WebHdfsOpenRead(parsed_uri, **kw)
elif mode in ('w', 'wb'):
return WebHdfsOpenWrite(parsed_uri, **kw)
else:
raise NotImplementedError("file mode %s not supported for %r scheme", mode, parsed_uri.scheme)
elif parsed_uri.scheme.startswith('http'):
if mode in ('r', 'rb'):
return HttpOpenRead(parsed_uri, **kw)
else:
raise NotImplementedError("file mode %s not supported for %r scheme", mode, parsed_uri.scheme)
else:
raise NotImplementedError("scheme %r is not supported", parsed_uri.scheme)
elif isinstance(uri, boto.s3.key.Key):
# handle case where we are given an S3 key directly
if mode in ('r', 'rb'):
return S3OpenRead(uri)
elif mode in ('w', 'wb'):
return S3OpenWrite(uri, **kw)
elif hasattr(uri, 'read'):
# simply pass-through if already a file-like
return uri
else:
raise TypeError('don\'t know how to handle uri %s' % repr(uri))
class ParseUri(object):
"""
Parse the given URI.
Supported URI schemes are "file", "s3", "s3n", "s3u" and "hdfs".
* s3 and s3n are treated the same way.
* s3u is s3 but without SSL.
Valid URI examples::
* s3://my_bucket/my_key
* s3://my_key:my_secret@my_bucket/my_key
* s3://my_key:my_secret@my_server:my_port@my_bucket/my_key
* hdfs:///path/file
* hdfs://path/file
* webhdfs://host:port/path/file
* ./local/path/file
* ~/local/path/file
* local/path/file
* ./local/path/file.gz
* file:///home/user/file
* file:///home/user/file.bz2
"""
def __init__(self, uri, default_scheme="file"):
"""
Assume `default_scheme` if no scheme given in `uri`.
"""
if os.name == 'nt':
# urlsplit doesn't work on Windows -- it parses the drive as the scheme...
if '://' not in uri:
# no protocol given => assume a local file
uri = 'file://' + uri
parsed_uri = urlsplit(uri, allow_fragments=False)
self.scheme = parsed_uri.scheme if parsed_uri.scheme else default_scheme
if self.scheme == "hdfs":
self.uri_path = parsed_uri.netloc + parsed_uri.path
self.uri_path = "/" + self.uri_path.lstrip("/")
if not self.uri_path:
raise RuntimeError("invalid HDFS URI: %s" % uri)
elif self.scheme == "webhdfs":
self.uri_path = parsed_uri.netloc + "/webhdfs/v1" + parsed_uri.path
if parsed_uri.query:
self.uri_path += "?" + parsed_uri.query
if not self.uri_path:
raise RuntimeError("invalid WebHDFS URI: %s" % uri)
elif self.scheme in ("s3", "s3n", "s3u"):
self.bucket_id = (parsed_uri.netloc + parsed_uri.path).split('@')
self.key_id = None
self.port = 443
self.host = boto.config.get('s3', 'host', 's3.amazonaws.com')
self.ordinary_calling_format = False
if len(self.bucket_id) == 1:
# URI without credentials: s3://bucket/object
self.bucket_id, self.key_id = self.bucket_id[0].split('/', 1)
# "None" credentials are interpreted as "look for credentials in other locations" by boto
self.access_id, self.access_secret = None, None
elif len(self.bucket_id) == 2 and len(self.bucket_id[0].split(':')) == 2:
# URI in full format: s3://key:secret@bucket/object
# access key id: [A-Z0-9]{20}
# secret access key: [A-Za-z0-9/+=]{40}
acc, self.bucket_id = self.bucket_id
self.access_id, self.access_secret = acc.split(':')
self.bucket_id, self.key_id = self.bucket_id.split('/', 1)
elif len(self.bucket_id) == 3 and len(self.bucket_id[0].split(':')) == 2:
# or URI in extended format: s3://key:secret@server[:port]@bucket/object
acc, server, self.bucket_id = self.bucket_id
self.access_id, self.access_secret = acc.split(':')
self.bucket_id, self.key_id = self.bucket_id.split('/', 1)
server = server.split(':')
self.ordinary_calling_format = True
self.host = server[0]
if len(server) == 2:
self.port = int(server[1])
else:
# more than 2 '@' means invalid uri
# Bucket names must be at least 3 and no more than 63 characters long.
# Bucket names must be a series of one or more labels.
# Adjacent labels are separated by a single period (.).
# Bucket names can contain lowercase letters, numbers, and hyphens.
# Each label must start and end with a lowercase letter or a number.
raise RuntimeError("invalid S3 URI: %s" % uri)
elif self.scheme == 'file':
self.uri_path = parsed_uri.netloc + parsed_uri.path
# '~/tmp' may be expanded to '/Users/username/tmp'
self.uri_path = os.path.expanduser(self.uri_path)
if not self.uri_path:
raise RuntimeError("invalid file URI: %s" % uri)
elif self.scheme.startswith('http'):
self.uri_path = uri
else:
raise NotImplementedError("unknown URI scheme %r in %r" % (self.scheme, uri))
def is_gzip(name):
"""Return True if the name indicates that the file is compressed with
gzip."""
return name.endswith(".gz")
class S3ReadStreamInner(object):
def __init__(self, stream):
self.stream = stream
self.unused_buffer = b''
self.closed = False
self.finished = False
def read_until_eof(self):
#
# This method is here because boto.s3.Key.read() reads the entire
# file, which isn't expected behavior.
#
# https://github.com/boto/boto/issues/3311
#
buf = b""
while not self.finished:
raw = self.stream.read(io.DEFAULT_BUFFER_SIZE)
if len(raw) > 0:
buf += raw
else:
self.finished = True
return buf
def read_from_buffer(self, size):
"""Remove at most size bytes from our buffer and return them."""
part = self.unused_buffer[:size]
self.unused_buffer = self.unused_buffer[size:]
return part
def read(self, size=None):
if not size or size < 0:
return self.read_from_buffer(
len(self.unused_buffer)) + self.read_until_eof()
# Use unused data first
if len(self.unused_buffer) >= size:
return self.read_from_buffer(size)
# If the stream is finished and no unused raw data, return what we have
if self.stream.closed or self.finished:
self.finished = True
return self.read_from_buffer(size)
# Consume new data in chunks and return it.
while len(self.unused_buffer) < size:
raw = self.stream.read(io.DEFAULT_BUFFER_SIZE)
if len(raw):
self.unused_buffer += raw
else:
self.finished = True
break
return self.read_from_buffer(size)
def readinto(self, b):
# Read up to len(b) bytes into bytearray b
# Sadly not as efficient as lower level
data = self.read(len(b))
if not data:
return None
b[:len(data)] = data
return len(data)
def readable(self):
# io.BufferedReader needs us to appear readable
return True
def _checkReadable(self, msg=None):
# This is required to satisfy io.BufferedReader on Python 2.6.
# Another way to achieve this is to inherit from io.IOBase, but that
# leads to other problems.
return True
class S3ReadStream(io.BufferedReader):
def __init__(self, key):
self.stream = S3ReadStreamInner(key)
super(S3ReadStream, self).__init__(self.stream)
def read(self, *args, **kwargs):
# Patch read to return '' instead of raise Value Error
# TODO: what actually raises ValueError in the following code?
try:
#
# io.BufferedReader behaves differently to a built-in file object.
# If the object is in non-blocking mode and no bytes are available,
# the former will return None. The latter returns an empty string.
# We want to behave like a built-in file object here.
#
result = super(S3ReadStream, self).read(*args, **kwargs)
if result is None:
return ""
return result
except ValueError:
return ''
def readline(self, *args, **kwargs):
# Patch readline to return '' instead of raise Value Error
# TODO: what actually raises ValueError in the following code?
try:
result = super(S3ReadStream, self).readline(*args, **kwargs)
return result
except ValueError:
return ''
class S3OpenRead(object):
"""
Implement streamed reader from S3, as an iterable & context manager.
Supports reading from gzip-compressed files. Identifies such files by
their extension.
"""
def __init__(self, read_key):
if not hasattr(read_key, "bucket") and not hasattr(read_key, "name") and not hasattr(read_key, "read") \
and not hasattr(read_key, "close"):
raise TypeError("can only process S3 keys")
self.read_key = read_key
self._open_reader()
def _open_reader(self):
if is_gzip(self.read_key.name):
self.reader = gzipstreamfile.GzipStreamFile(self.read_key)
else:
self.reader = S3ReadStream(self.read_key)
def __iter__(self):
for line in self.reader:
yield line
def readline(self):
return self.reader.readline()
def read(self, size=None):
"""
Read a specified number of bytes from the key.
"""
return self.reader.read(size)
def seek(self, offset, whence=0):
"""
Seek to the specified position.
Only seeking to the beginning (offset=0) supported for now.
"""
if whence != 0 or offset != 0:
raise NotImplementedError("seek other than offset=0 not implemented yet")
self.read_key.close(fast=True)
self._open_reader()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.read_key.close(fast=True)
def __str__(self):
return "%s<key: %s>" % (
self.__class__.__name__, self.read_key
)
class HdfsOpenRead(object):
"""
Implement streamed reader from HDFS, as an iterable & context manager.
"""
def __init__(self, parsed_uri):
if parsed_uri.scheme not in ("hdfs"):
raise TypeError("can only process HDFS files")
self.parsed_uri = parsed_uri
def __iter__(self):
hdfs = subprocess.Popen(["hdfs", "dfs", "-cat", self.parsed_uri.uri_path], stdout=subprocess.PIPE)
return hdfs.stdout
def read(self, size=None):
raise NotImplementedError("read() not implemented yet")
def seek(self, offset, whence=None):
raise NotImplementedError("seek() not implemented yet")
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
class HdfsOpenWrite(object):
"""
Implement streamed writer from HDFS, as an iterable & context manager.
"""
def __init__(self, parsed_uri):
if parsed_uri.scheme not in ("hdfs"):
raise TypeError("can only process HDFS files")
self.parsed_uri = parsed_uri
self.out_pipe = subprocess.Popen(["hdfs","dfs","-put","-f","-",self.parsed_uri.uri_path], stdin=subprocess.PIPE)
def write(self, b):
self.out_pipe.stdin.write(b)
def seek(self, offset, whence=None):
raise NotImplementedError("seek() not implemented yet")
def __enter__(self):
return self
def close(self):
self.out_pipe.stdin.close()
def __exit__(self, type, value, traceback):
self.close()
class WebHdfsOpenRead(object):
"""
Implement streamed reader from WebHDFS, as an iterable & context manager.
NOTE: it does not support kerberos authentication yet
"""
def __init__(self, parsed_uri):
if parsed_uri.scheme not in ("webhdfs"):
raise TypeError("can only process WebHDFS files")
self.parsed_uri = parsed_uri
self.offset = 0
def __iter__(self):
payload = {"op": "OPEN"}
response = requests.get("http://" + self.parsed_uri.uri_path, params=payload, stream=True)
return response.iter_lines()
def read(self, size=None):
"""
Read the specific number of bytes from the file
Note read() and line iteration (`for line in self: ...`) each have their
own file position, so they are independent. Doing a `read` will not affect
the line iteration, and vice versa.
"""
if not size or size < 0:
payload = {"op": "OPEN", "offset": self.offset}
self.offset = 0
else:
payload = {"op": "OPEN", "offset": self.offset, "length": size}
self.offset = self.offset + size
response = requests.get("http://" + self.parsed_uri.uri_path, params=payload, stream=True)
return response.content
def seek(self, offset, whence=0):
"""
Seek to the specified position.
Only seeking to the beginning (offset=0) supported for now.
"""
if whence == 0 and offset == 0:
self.offset = 0
elif whence == 0:
self.offset = offset
else:
raise NotImplementedError("operations with whence not implemented yet")
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def make_closing(base, **attrs):
"""
Add support for `with Base(attrs) as fout:` to the base class if it's missing.
The base class' `close()` method will be called on context exit, to always close the file properly.
This is needed for gzip.GzipFile, bz2.BZ2File etc in older Pythons (<=2.6), which otherwise
raise "AttributeError: GzipFile instance has no attribute '__exit__'".
"""
if not hasattr(base, '__enter__'):
attrs['__enter__'] = lambda self: self
if not hasattr(base, '__exit__'):
attrs['__exit__'] = lambda self, type, value, traceback: self.close()
return type('Closing' + base.__name__, (base, object), attrs)
def compression_wrapper(file_obj, filename, mode):
"""
This function will wrap the file_obj with an appropriate
[de]compression mechanism based on the extension of the filename.
file_obj must either be a filehandle object, or a class which behaves
like one.
If the filename extension isn't recognized, will simply return the original
file_obj.
"""
_, ext = os.path.splitext(filename)
if ext == '.bz2':
if IS_PY2:
from bz2file import BZ2File
else:
from bz2 import BZ2File
return make_closing(BZ2File)(filename, mode)
elif ext == '.gz':
from gzip import GzipFile
return make_closing(GzipFile)(filename, mode)
else:
return file_obj
def file_smart_open(fname, mode='rb'):
"""
Stream from/to local filesystem, transparently (de)compressing gzip and bz2
files if necessary.
"""
return compression_wrapper(open(fname, mode), fname, mode)
class HttpReadStream(object):
"""
Implement streamed reader from a web site, as an iterable & context manager.
Supports Kerberos and Basic HTTP authentication.
As long as you don't mix different access patterns (readline vs readlines vs
read(n) vs read() vs iteration) this will load efficiently in memory.
"""
def __init__(self, url, mode='r', kerberos=False, user=None, password=None):
"""
If Kerberos is True, will attempt to use the local Kerberos credentials.
Otherwise, will try to use "basic" HTTP authentication via username/password.
If none of those are set, will connect unauthenticated.
"""
if IS_PY2:
from urllib2 import urlopen
else:
from urllib.request import urlopen
if kerberos:
import requests_kerberos
auth = requests_kerberos.HTTPKerberosAuth()
elif user is not None and password is not None:
auth = (user, password)
else:
auth = None
self.response = requests.get(url, auth=auth, stream=True)
if not self.response.ok:
self.response.raise_for_status()
self._read_buffer = None
self._read_iter = None
self._readline_iter = None
def __iter__(self):
return self.response.iter_lines()
def readline(self):
"""
Mimics the readline call to a filehandle object.
"""
if self._readline_iter is None:
self._readline_iter = self.response.iter_lines()
try:
return next(self._readline_iter)
except StopIteration:
raise EOFError()
def readlines(self):
"""
Mimics the readlines call to a filehandle object.
"""
return list(self.response.iter_lines())
def read(self, size=None):
"""
Mimics the read call to a filehandle object.
"""
if size is None:
return self.response.content
else:
if self._read_iter is None:
self._read_iter = self.response.iter_content(size)
self._read_buffer = next(self._read_iter)
while len(self._read_buffer) < size:
try:
self._read_buffer += next(self._read_iter)
except StopIteration:
# Oops, ran out of data early.
retval = self._read_buffer
self._read_buffer = ''
if len(retval) == 0:
raise EOFError()
else:
return retval
# If we got here, it means we have enough data in the buffer
# to return to the caller.
retval = self._read_buffer[:size]
self._read_buffer = self._read_buffer[size:]
return retval
def __enter__(self, *args, **kwargs):
return self
def __exit__(self, *args, **kwargs):
self.response.close()
def HttpOpenRead(parsed_uri, mode='r', **kwargs):
if parsed_uri.scheme not in ('http', 'https'):
raise TypeError("can only process http/https urls")
if mode not in ('r', 'rb'):
raise NotImplementedError('Streaming write to http not supported')
url = parsed_uri.uri_path
response = HttpReadStream(url, **kwargs)
fname = url.split('/')[-1]
return compression_wrapper(response, fname, mode)
class S3OpenWrite(object):
"""
Context manager for writing into S3 files.
"""
def __init__(self, outkey, min_part_size=S3_MIN_PART_SIZE, **kw):
"""
Streamed input is uploaded in chunks, as soon as `min_part_size` bytes are
accumulated (50MB by default). The minimum chunk size allowed by AWS S3
is 5MB.
"""
if not hasattr(outkey, "bucket") and not hasattr(outkey, "name"):
raise TypeError("can only process S3 keys")
if is_gzip(outkey.name):
raise NotImplementedError("streaming write to S3 gzip not supported")
self.outkey = outkey
self.min_part_size = min_part_size
if min_part_size < 5 * 1024 ** 2:
logger.warning("S3 requires minimum part size >= 5MB; multipart upload may fail")
# initialize mulitpart upload
self.mp = self.outkey.bucket.initiate_multipart_upload(self.outkey, **kw)
# initialize stats
self.lines = []
self.total_size = 0
self.chunk_bytes = 0
self.parts = 0
def __str__(self):
return "%s<key: %s, min_part_size: %s>" % (
self.__class__.__name__, self.outkey, self.min_part_size,
)
def write(self, b):
"""
Write the given bytes (binary string) into the S3 file from constructor.
Note there's buffering happening under the covers, so this may not actually
do any HTTP transfer right away.
"""
if isinstance(b, six.text_type):
# not part of API: also accept unicode => encode it as utf8
b = b.encode('utf8')
if not isinstance(b, six.binary_type):
raise TypeError("input must be a binary string")
self.lines.append(b)
self.chunk_bytes += len(b)
self.total_size += len(b)
if self.chunk_bytes >= self.min_part_size:
buff = b"".join(self.lines)
logger.info("uploading part #%i, %i bytes (total %.3fGB)" % (self.parts, len(buff), self.total_size / 1024.0 ** 3))
self.mp.upload_part_from_file(BytesIO(buff), part_num=self.parts + 1)
logger.debug("upload of part #%i finished" % self.parts)
self.parts += 1
self.lines, self.chunk_bytes = [], 0
def seek(self, offset, whence=None):
raise NotImplementedError("seek() not implemented yet")
def close(self):
buff = b"".join(self.lines)
if buff:
logger.info("uploading last part #%i, %i bytes (total %.3fGB)" % (self.parts, len(buff), self.total_size / 1024.0 ** 3))
self.mp.upload_part_from_file(BytesIO(buff), part_num=self.parts + 1)
logger.debug("upload of last part #%i finished" % self.parts)
if self.total_size:
self.mp.complete_upload()
else:
# AWS complains with "The XML you provided was not well-formed or did not validate against our published schema"
# when the input is completely empty => abort the upload, no file created
logger.info("empty input, ignoring multipart upload")
self.outkey.bucket.cancel_multipart_upload(self.mp.key_name, self.mp.id)
# So, instead, create an empty file like this
logger.info("setting an empty value for the key")
self.outkey.set_contents_from_string('')
def __enter__(self):
return self
def _termination_error(self):
logger.exception("encountered error while terminating multipart upload; attempting cancel")
self.outkey.bucket.cancel_multipart_upload(self.mp.key_name, self.mp.id)
logger.info("cancel completed")
def __exit__(self, type, value, traceback):
if type is not None:
self._termination_error()
return False
try:
self.close()
except:
self._termination_error()
raise
class WebHdfsOpenWrite(object):
"""
Context manager for writing into webhdfs files
"""
def __init__(self, parsed_uri, min_part_size=WEBHDFS_MIN_PART_SIZE):
if parsed_uri.scheme not in ("webhdfs"):
raise TypeError("can only process WebHDFS files")
self.parsed_uri = parsed_uri
self.closed = False
self.min_part_size = min_part_size
# creating empty file first
payload = {"op": "CREATE", "overwrite": True}
init_response = requests.put("http://" + self.parsed_uri.uri_path, params=payload, allow_redirects=False)
if not init_response.status_code == httplib.TEMPORARY_REDIRECT:
raise WebHdfsException(str(init_response.status_code) + "\n" + init_response.content)
uri = init_response.headers['location']
response = requests.put(uri, data="", headers={'content-type': 'application/octet-stream'})
if not response.status_code == httplib.CREATED:
raise WebHdfsException(str(response.status_code) + "\n" + response.content)
self.lines = []
self.parts = 0
self.chunk_bytes = 0
self.total_size = 0
def upload(self, data):
payload = {"op": "APPEND"}
init_response = requests.post("http://" + self.parsed_uri.uri_path, params=payload, allow_redirects=False)
if not init_response.status_code == httplib.TEMPORARY_REDIRECT:
raise WebHdfsException(str(init_response.status_code) + "\n" + init_response.content)
uri = init_response.headers['location']
response = requests.post(uri, data=data, headers={'content-type': 'application/octet-stream'})
if not response.status_code == httplib.OK:
raise WebHdfsException(str(response.status_code) + "\n" + response.content)
def write(self, b):
"""
Write the given bytes (binary string) into the WebHDFS file from constructor.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if isinstance(b, six.text_type):
# not part of API: also accept unicode => encode it as utf8
b = b.encode('utf8')
if not isinstance(b, six.binary_type):
raise TypeError("input must be a binary string")
self.lines.append(b)
self.chunk_bytes += len(b)
self.total_size += len(b)
if self.chunk_bytes >= self.min_part_size:
buff = b"".join(self.lines)
logger.info("uploading part #%i, %i bytes (total %.3fGB)" % (self.parts, len(buff), self.total_size / 1024.0 ** 3))
self.upload(buff)
logger.debug("upload of part #%i finished" % self.parts)
self.parts += 1
self.lines, self.chunk_bytes = [], 0
def seek(self, offset, whence=None):
raise NotImplementedError("seek() not implemented yet")
def close(self):
buff = b"".join(self.lines)
if buff:
logger.info("uploading last part #%i, %i bytes (total %.3fGB)" % (self.parts, len(buff), self.total_size / 1024.0 ** 3))
self.upload(buff)
logger.debug("upload of last part #%i finished" % self.parts)
self.closed = True
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def s3_iter_bucket_process_key(key, retries=3):
"""
Conceptually part of `s3_iter_bucket`, but must remain top-level method because
of pickling visibility.
"""
# Sometimes, https://github.com/boto/boto/issues/2409 can happen because of network issues on either side.
# Retry up to 3 times to ensure its not a transient issue.
for x in range(0, retries + 1):
try:
return key, key.get_contents_as_string()
except SSLError:
# Actually fail on last pass through the loop
if x == retries:
raise
# Otherwise, try again, as this might be a transient timeout
pass
def s3_iter_bucket(bucket, prefix='', accept_key=lambda key: True, key_limit=None, workers=16):
"""
Iterate and download all S3 files under `bucket/prefix`, yielding out
`(key, key content)` 2-tuples (generator).
`accept_key` is a function that accepts a key name (unicode string) and
returns True/False, signalling whether the given key should be downloaded out or
not (default: accept all keys).
If `key_limit` is given, stop after yielding out that many results.
The keys are processed in parallel, using `workers` processes (default: 16),
to speed up downloads greatly. If multiprocessing is not available, thus
MULTIPROCESSING is False, this parameter will be ignored.
Example::
>>> mybucket = boto.connect_s3().get_bucket('mybucket')
>>> # get all JSON files under "mybucket/foo/"
>>> for key, content in s3_iter_bucket(mybucket, prefix='foo/', accept_key=lambda key: key.endswith('.json')):
... print key, len(content)
>>> # limit to 10k files, using 32 parallel workers (default is 16)
>>> for key, content in s3_iter_bucket(mybucket, key_limit=10000, workers=32):
... print key, len(content)
"""
total_size, key_no = 0, -1
keys = (key for key in bucket.list(prefix=prefix) if accept_key(key.name))
if MULTIPROCESSING:
logger.info("iterating over keys from %s with %i workers" % (bucket, workers))
pool = multiprocessing.pool.Pool(processes=workers)
iterator = pool.imap_unordered(s3_iter_bucket_process_key, keys)
else:
logger.info("iterating over keys from %s without multiprocessing" % bucket)
iterator = imap(s3_iter_bucket_process_key, keys)
for key_no, (key, content) in enumerate(iterator):
if key_no % 1000 == 0:
logger.info("yielding key #%i: %s, size %i (total %.1fMB)" %
(key_no, key, len(content), total_size / 1024.0 ** 2))
yield key, content
key.close()
total_size += len(content)
if key_limit is not None and key_no + 1 >= key_limit:
# we were asked to output only a limited number of keys => we're done
break
if MULTIPROCESSING:
pool.terminate()
logger.info("processed %i keys, total size %i" % (key_no + 1, total_size))
class WebHdfsException(Exception):
def __init__(self, msg=str()):
self.msg = msg
super(WebHdfsException, self).__init__(self.msg)
| duyet-website/api.duyet.net | lib/smart_open/smart_open_lib.py | Python | mit | 36,873 |
"""
Support for Harmony Hub devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/remote.harmony/
"""
import logging
import asyncio
from os import path
import time
import voluptuous as vol
import homeassistant.components.remote as remote
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
CONF_NAME, CONF_HOST, CONF_PORT, ATTR_ENTITY_ID, EVENT_HOMEASSISTANT_STOP)
from homeassistant.components.remote import (
PLATFORM_SCHEMA, DOMAIN, ATTR_DEVICE, ATTR_ACTIVITY, ATTR_NUM_REPEATS,
ATTR_DELAY_SECS, DEFAULT_DELAY_SECS)
from homeassistant.util import slugify
from homeassistant.config import load_yaml_config_file
REQUIREMENTS = ['pyharmony==1.0.18']
_LOGGER = logging.getLogger(__name__)
DEFAULT_PORT = 5222
DEVICES = []
CONF_DEVICE_CACHE = 'harmony_device_cache'
SERVICE_SYNC = 'harmony_sync'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Required(ATTR_ACTIVITY, default=None): cv.string,
vol.Optional(ATTR_DELAY_SECS, default=DEFAULT_DELAY_SECS):
vol.Coerce(float),
})
HARMONY_SYNC_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Harmony platform."""
host = None
activity = None
if CONF_DEVICE_CACHE not in hass.data:
hass.data[CONF_DEVICE_CACHE] = []
if discovery_info:
# Find the discovered device in the list of user configurations
override = next((c for c in hass.data[CONF_DEVICE_CACHE]
if c.get(CONF_NAME) == discovery_info.get(CONF_NAME)),
False)
port = DEFAULT_PORT
delay_secs = DEFAULT_DELAY_SECS
if override:
activity = override.get(ATTR_ACTIVITY)
delay_secs = override.get(ATTR_DELAY_SECS)
port = override.get(CONF_PORT, DEFAULT_PORT)
host = (
discovery_info.get(CONF_NAME),
discovery_info.get(CONF_HOST),
port)
# Ignore hub name when checking if this hub is known - ip and port only
if host and host[1:] in (h.host for h in DEVICES):
_LOGGER.debug("Discovered host already known: %s", host)
return
elif CONF_HOST in config:
host = (
config.get(CONF_NAME),
config.get(CONF_HOST),
config.get(CONF_PORT),
)
activity = config.get(ATTR_ACTIVITY)
delay_secs = config.get(ATTR_DELAY_SECS)
else:
hass.data[CONF_DEVICE_CACHE].append(config)
return
name, address, port = host
_LOGGER.info("Loading Harmony Platform: %s at %s:%s, startup activity: %s",
name, address, port, activity)
harmony_conf_file = hass.config.path(
'{}{}{}'.format('harmony_', slugify(name), '.conf'))
try:
device = HarmonyRemote(
name, address, port, activity, harmony_conf_file, delay_secs)
DEVICES.append(device)
add_devices([device])
register_services(hass)
except ValueError:
_LOGGER.warning("Failed to initialize remote: %s", name)
def register_services(hass):
"""Register all services for harmony devices."""
descriptions = load_yaml_config_file(
path.join(path.dirname(__file__), 'services.yaml'))
hass.services.register(
DOMAIN, SERVICE_SYNC, _sync_service, descriptions.get(SERVICE_SYNC),
schema=HARMONY_SYNC_SCHEMA)
def _apply_service(service, service_func, *service_func_args):
"""Handle services to apply."""
entity_ids = service.data.get('entity_id')
if entity_ids:
_devices = [device for device in DEVICES
if device.entity_id in entity_ids]
else:
_devices = DEVICES
for device in _devices:
service_func(device, *service_func_args)
device.schedule_update_ha_state(True)
def _sync_service(service):
_apply_service(service, HarmonyRemote.sync)
class HarmonyRemote(remote.RemoteDevice):
"""Remote representation used to control a Harmony device."""
def __init__(self, name, host, port, activity, out_path, delay_secs):
"""Initialize HarmonyRemote class."""
import pyharmony
from pathlib import Path
_LOGGER.debug("HarmonyRemote device init started for: %s", name)
self._name = name
self.host = host
self._port = port
self._state = None
self._current_activity = None
self._default_activity = activity
self._client = pyharmony.get_client(host, port, self.new_activity)
self._config_path = out_path
self._config = self._client.get_config()
if not Path(self._config_path).is_file():
_LOGGER.debug("Writing harmony configuration to file: %s",
out_path)
pyharmony.ha_write_config_file(self._config, self._config_path)
self._delay_secs = delay_secs
@asyncio.coroutine
def async_added_to_hass(self):
"""Complete the initialization."""
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP,
lambda event: self._client.disconnect(wait=True))
# Poll for initial state
self.new_activity(self._client.get_current_activity())
@property
def name(self):
"""Return the Harmony device's name."""
return self._name
@property
def should_poll(self):
"""Return the fact that we should not be polled."""
return False
@property
def device_state_attributes(self):
"""Add platform specific attributes."""
return {'current_activity': self._current_activity}
@property
def is_on(self):
"""Return False if PowerOff is the current activity, otherwise True."""
return self._current_activity not in [None, 'PowerOff']
def new_activity(self, activity_id):
"""Callback for updating the current activity."""
import pyharmony
activity_name = pyharmony.activity_name(self._config, activity_id)
_LOGGER.debug("%s activity reported as: %s", self._name, activity_name)
self._current_activity = activity_name
self._state = bool(self._current_activity != 'PowerOff')
self.schedule_update_ha_state()
def turn_on(self, **kwargs):
"""Start an activity from the Harmony device."""
import pyharmony
activity = kwargs.get(ATTR_ACTIVITY, self._default_activity)
if activity:
activity_id = pyharmony.activity_id(self._config, activity)
self._client.start_activity(activity_id)
self._state = True
else:
_LOGGER.error("No activity specified with turn_on service")
def turn_off(self, **kwargs):
"""Start the PowerOff activity."""
self._client.power_off()
def send_command(self, commands, **kwargs):
"""Send a list of commands to one device."""
device = kwargs.get(ATTR_DEVICE)
if device is None:
_LOGGER.error("Missing required argument: device")
return
num_repeats = kwargs.get(ATTR_NUM_REPEATS)
delay_secs = kwargs.get(ATTR_DELAY_SECS, self._delay_secs)
for _ in range(num_repeats):
for command in commands:
self._client.send_command(device, command)
time.sleep(delay_secs)
def sync(self):
"""Sync the Harmony device with the web service."""
import pyharmony
_LOGGER.debug("Syncing hub with Harmony servers")
self._client.sync()
self._config = self._client.get_config()
_LOGGER.debug("Writing hub config to file: %s", self._config_path)
pyharmony.ha_write_config_file(self._config, self._config_path)
| ewandor/home-assistant | homeassistant/components/remote/harmony.py | Python | apache-2.0 | 7,993 |
# -*- coding: utf-8 -*-
#
# test_mpitests.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import unittest
import nest
import os
from subprocess import call
HAVE_MPI = nest.ll_api.sli_func("statusdict/have_mpi ::")
class TestMPIDependentTests(unittest.TestCase):
@unittest.skipIf(not HAVE_MPI, 'NEST was compiled without MPI')
def testsWithMPI(self):
if HAVE_MPI:
failing_tests = []
mpitests = [('mpitest_get_local_vps.py', 4),
('test_sp/mpitest_issue_578_sp.py', 2)]
path = os.path.dirname(__file__)
for test, num_procs in mpitests:
test = os.path.join(path, test)
command = nest.ll_api.sli_func("mpirun", num_procs, "nosetests", test)
print("Executing test with command: " + command)
command = command.split()
my_env = os.environ.copy()
returncode = call(command, env=my_env)
if returncode != 0: # call returns 0 for passing tests
failing_tests.append((test, num_procs))
self.assertTrue(not failing_tests, 'The following tests failed ' +
'when executing with "mpirun -np N nosetests ' +
'[script]": {}'.format(failing_tests))
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(
TestStructuralPlasticityMPI)
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
| stinebuu/nest-simulator | pynest/nest/tests/test_mpitests.py | Python | gpl-2.0 | 2,193 |
# -*- coding: utf-8 -*-
"""
Installs and configures Ceilometer
"""
import logging
import os
import uuid
from packstack.installer import utils
from packstack.installer import validators
from packstack.installer import processors
from packstack.modules.shortcuts import get_mq
from packstack.modules.ospluginutils import (getManifestTemplate,
appendManifestFile)
#------------------ oVirt installer initialization ------------------
PLUGIN_NAME = "OS-Ceilometer"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
def initConfig(controller):
ceilometer_params = {
"CEILOMETER": [
{"CONF_NAME": "CONFIG_CEILOMETER_SECRET",
"CMD_OPTION": "ceilometer-secret",
"USAGE": "Secret key for signing metering messages",
"PROMPT": "Enter the Ceilometer secret key",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": uuid.uuid4().hex[:16],
"MASK_INPUT": True,
"LOOSE_VALIDATION": False,
"USE_DEFAULT": True,
"NEED_CONFIRM": True,
"CONDITION": False},
{"CONF_NAME": "CONFIG_CEILOMETER_KS_PW",
"CMD_OPTION": "ceilometer-ks-passwd",
"USAGE": ("The password to use for Ceilometer to authenticate "
"with Keystone"),
"PROMPT": "Enter the password for the Ceilometer Keystone access",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_not_empty],
"DEFAULT_VALUE": "PW_PLACEHOLDER",
"PROCESSORS": [processors.process_password],
"MASK_INPUT": True,
"LOOSE_VALIDATION": False,
"USE_DEFAULT": False,
"NEED_CONFIRM": True,
"CONDITION": False},
],
"MONGODB": [
{"CMD_OPTION": "mongodb-host",
"USAGE": ("The IP address of the server on which to install "
"MongoDB"),
"PROMPT": "Enter the IP address of the MongoDB server",
"OPTION_LIST": [],
"VALIDATORS": [validators.validate_ssh],
"DEFAULT_VALUE": utils.get_localhost_ip(),
"MASK_INPUT": False,
"LOOSE_VALIDATION": True,
"CONF_NAME": "CONFIG_MONGODB_HOST",
"USE_DEFAULT": False,
"NEED_CONFIRM": False,
"CONDITION": False},
],
}
ceilometer_groups = [
{"GROUP_NAME": "CEILOMETER",
"DESCRIPTION": "Ceilometer Config parameters",
"PRE_CONDITION": "CONFIG_CEILOMETER_INSTALL",
"PRE_CONDITION_MATCH": "y",
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True},
{"GROUP_NAME": "MONGODB",
"DESCRIPTION": "MONGODB Config parameters",
"PRE_CONDITION": "CONFIG_CEILOMETER_INSTALL",
"PRE_CONDITION_MATCH": "y",
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True},
]
for group in ceilometer_groups:
paramList = ceilometer_params[group["GROUP_NAME"]]
controller.addGroup(group, paramList)
def initSequences(controller):
if controller.CONF['CONFIG_CEILOMETER_INSTALL'] != 'y':
return
steps = [{'title': 'Adding MongoDB manifest entries',
'functions': [create_mongodb_manifest]},
{'title': 'Adding Ceilometer manifest entries',
'functions': [create_manifest]},
{'title': 'Adding Ceilometer Keystone manifest entries',
'functions': [create_keystone_manifest]}]
controller.addSequence("Installing OpenStack Ceilometer", [], [],
steps)
#-------------------------- step functions --------------------------
def create_manifest(config, messages):
manifestfile = "%s_ceilometer.pp" % config['CONFIG_CONTROLLER_HOST']
manifestdata = getManifestTemplate(get_mq(config, "ceilometer"))
manifestdata += getManifestTemplate("ceilometer.pp")
config['FIREWALL_ALLOWED'] = "'ALL'"
config['FIREWALL_SERVICE_NAME'] = 'ceilometer-api'
config['FIREWALL_SERVICE_ID'] = 'ceilometer_api'
config['FIREWALL_PORTS'] = "'8777'"
config['FIREWALL_CHAIN'] = "INPUT"
config['FIREWALL_PROTOCOL'] = 'tcp'
manifestdata += getManifestTemplate("firewall.pp")
# Add a template that creates a group for nova because the ceilometer
# class needs it
if config['CONFIG_NOVA_INSTALL'] == 'n':
manifestdata += getManifestTemplate("ceilometer_nova_disabled.pp")
appendManifestFile(manifestfile, manifestdata)
def create_mongodb_manifest(config, messages):
manifestfile = "%s_mongodb.pp" % config['CONFIG_MONGODB_HOST']
manifestdata = getManifestTemplate("mongodb.pp")
config['FIREWALL_ALLOWED'] = "'%s'" % config['CONFIG_CONTROLLER_HOST']
config['FIREWALL_SERVICE_NAME'] = 'mongodb-server'
config['FIREWALL_PORTS'] = "'27017'"
config['FIREWALL_PROTOCOL'] = 'tcp'
manifestdata += getManifestTemplate("firewall.pp")
appendManifestFile(manifestfile, manifestdata, 'pre')
def create_keystone_manifest(config, messages):
manifestfile = "%s_keystone.pp" % config['CONFIG_CONTROLLER_HOST']
manifestdata = getManifestTemplate("keystone_ceilometer.pp")
appendManifestFile(manifestfile, manifestdata)
| fr34k8/packstack | packstack/plugins/ceilometer_800.py | Python | apache-2.0 | 5,396 |
"""Access control role."""
from balrog import exceptions
class Role(object):
"""Role, a set of permissions that identity can have access to."""
def __init__(self, name, permissions):
"""Create a role.
:param name: Unique role name within one policy.
:param permissions: Permissions of the role.
"""
self.name = name
self.permissions = {}
for permission in permissions:
assert permission.name not in self.permissions, (
'The permission `{0}` is already registered within this role.'.format(permission.name)
)
self.permissions[permission.name] = permission
def check(self, identity, permission, *args, **kwargs):
"""Check if the identity has requested permission.
:param identity: Currently authenticated identity.
:param permission: Permission name.
:return: True if identity role has this permission.
"""
try:
permission = self.permissions[permission]
except KeyError:
return False
else:
return permission.check(identity, *args, **kwargs)
def filter(self, identity, permission, objects, *args, **kwargs):
"""Filter objects according to the permission this identity has.
:param identity: Currently authenticated identity.
:param permission: Permission name.
:param objects: Objects to filter out.
:returns: Filtered objects.
:raises: `PermissionNotFound` when no permission is found that can
filter the objects.
"""
try:
permission = self.permissions[permission]
except KeyError:
raise exceptions.PermissionNotFound()
else:
return permission.filter(identity, objects, *args, **kwargs)
| paylogic/balrog | balrog/role.py | Python | mit | 1,849 |
from decimal import Decimal as D
from datetime import datetime
from django.db import models
from django.db.models.signals import post_delete, post_save
from south.modelsinspector import add_introspection_rules
from cc.ripple import PRECISION, SCALE
from cc.general.util import cache_on_object
OVERALL_BALANCE_SQL = """
select sum(balance) from (
select ac.balance * cl.bal_mult as balance
from account_creditline as cl
join account_account as ac on cl.account_id = ac.id
where cl.node_id = %s) as cl_balances
"""
TRUSTED_BALANCE_SQL = """
select sum(trusted_balance) from (
select (case when ac.balance * cl.bal_mult <= partner_cl.limit
then ac.balance * cl.bal_mult
else partner_cl.limit end) as trusted_balance
from account_creditline as cl
join account_account as ac on cl.account_id = ac.id
join account_creditline as partner_cl
on partner_cl.account_id = cl.account_id and
partner_cl.node_id != cl.node_id
where cl.node_id = %s) as trusted_balances
"""
class AmountField(models.DecimalField):
"Field for value amounts."
def __init__(self, *args, **kwargs):
kwargs['max_digits'] = PRECISION
kwargs['decimal_places'] = SCALE
super(AmountField, self).__init__(*args, **kwargs)
# Enable south migrations for custom fields.
add_introspection_rules([], ["^cc\.general"])
class Node(models.Model):
"A node in the Ripple graph."
alias = models.PositiveIntegerField(unique=True) # Profile ID.
def __unicode__(self):
return u"Node %d" % self.alias
def __repr__(self):
return "Node(%d)" % self.alias
def out_creditlines(self):
return self.creditlines.all()
def _balance_query(self, sql_template):
from django.db import connections
cursor = connections['ripple'].cursor()
cursor.execute(sql_template, (self.id,))
row = cursor.fetchone()
return row[0] or D('0')
def overall_balance(self):
return self._balance_query(OVERALL_BALANCE_SQL)
def trusted_balance(self):
"""
Return sum of all negative balances and all positive balances within
credit limits.
"""
return self._balance_query(TRUSTED_BALANCE_SQL)
class AccountManager(models.Manager):
def create_account(self, node1, node2):
"""
Create account between two nodes.
Also creates the required CreditLine records.
"""
acct = self.create()
pos_cl = CreditLine.objects.create(
account=acct, node=node1, bal_mult=1)
neg_cl = CreditLine.objects.create(
account=acct, node=node2, bal_mult=-1)
# Manually update new creditlines in cached graphs.
from cc.payment import flow
flow.update_creditline_in_cached_graphs(pos_cl)
flow.update_creditline_in_cached_graphs(neg_cl)
return acct
def get_account(self, node1, node2):
"Gets account between node1 and node2."
# TODO: Test this thoroughly.
acct_list = list(self.raw(
"select a.* from account_account a "
"join account_creditline c1 on c1.account_id = a.id "
"join account_creditline c2 on c2.account_id = a.id "
"where c1.node_id = %s "
"and c2.node_id = %s", (node1.id, node2.id)))
if len(acct_list) == 0:
return None
elif len(acct_list) == 1:
acct = acct_list[0]
else:
raise Account.MultipleObjectsReturned()
return acct
def get_or_create_account(self, node1, node2):
acct = self.get_account(node1, node2)
if acct is None:
acct = self.create_account(node1, node2)
return acct
class Account(models.Model):
"""
A mutual credit account that tracks IOUs between two nodes.
This table stores the balance and other data shared between the nodes.
CreditLine below stores symmetric data that each node has about
the account. Each account has two CreditLines.
"""
balance = AmountField(default=D('0'))
is_active = models.BooleanField(default=True)
created_on = models.DateTimeField(auto_now_add=True)
objects = AccountManager()
def __unicode__(self):
return u"Account %s" % self.id
@property
def pos_creditline(self):
return self.creditlines.get(bal_mult=1)
@property
def neg_creditline(self):
return self.creditlines.get(bal_mult=-1)
@property
def pos_node(self):
return self.pos_creditline.node
@property
def neg_node(self):
return self.neg_creditline.node
class CreditLine(models.Model):
"""
One node's data for and view on a mutual credit account.
"""
account = models.ForeignKey(Account, related_name='creditlines')
node = models.ForeignKey(Node, related_name='creditlines')
bal_mult = models.SmallIntegerField(
choices=((1, '+1'), (-1, '-1')))
# Max obligations node can emit to partner.
limit = AmountField(default=D('0'), null=True, blank=True)
def __unicode__(self):
return u"%s's credit line for account %s" % (self.node, self.account_id)
@property
def balance(self):
"Node's balance."
return self.account.balance * self.bal_mult
@property
@cache_on_object
def partner_creditline(self):
return CreditLine.objects.exclude(
node__pk=self.node_id).get(account__pk=self.account_id)
@property
def partner(self):
return self.partner_creditline.node
@property
def in_limit(self):
"Max obligations node will accept from partner."
return self.partner_creditline.limit
@classmethod
def post_save(cls, sender, instance, created, **kwargs):
if created:
# Newly-created creditlines may not have a partner yet,
# so updating them will blow up. Update new creditlines
# manually.
return
from cc.payment import flow
# TODO: Call from single external process -- not threadsafe!
flow.update_creditline_in_cached_graphs(instance)
@classmethod
def post_delete(cls, sender, instance, **kwargs):
# Delete partner creditline and account itself.
try:
instance.account.delete()
except Account.DoesNotExist:
pass
# Remove from cached flow graph.
from cc.payment import flow
# TODO: Call from single external process -- not threadsafe!
# XXX: This is broken - tries to load partner creditline, which
# may already be (is?) deleted.
flow.update_creditline_in_cached_graphs(instance)
post_save.connect(CreditLine.post_save, CreditLine,
dispatch_uid='account.models')
post_delete.connect(CreditLine.post_delete, CreditLine,
dispatch_uid='account.models')
| rfugger/villagescc | cc/account/models.py | Python | agpl-3.0 | 6,949 |
from django import forms
from django_measurement.forms import MeasurementField
from tests.custom_measure_base import DegreePerTime, Temperature, Time
from tests.models import MeasurementTestModel
class MeasurementTestForm(forms.ModelForm):
class Meta:
model = MeasurementTestModel
exclude = []
class LabelTestForm(forms.Form):
simple = MeasurementField(Temperature)
class SITestForm(forms.Form):
simple = MeasurementField(Time)
class BiDimensionalLabelTestForm(forms.Form):
simple = MeasurementField(DegreePerTime)
| coddingtonbear/django-measurement | tests/forms.py | Python | mit | 556 |
"""
sphinx.builders.latex
~~~~~~~~~~~~~~~~~~~~~
LaTeX builder.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import warnings
from os import path
from typing import Any, Dict, Iterable, List, Tuple, Union
from docutils.frontend import OptionParser
from docutils.nodes import Node
import sphinx.builders.latex.nodes # NOQA # Workaround: import this before writer to avoid ImportError
from sphinx import addnodes, highlighting, package_dir
from sphinx.application import Sphinx
from sphinx.builders import Builder
from sphinx.builders.latex.constants import ADDITIONAL_SETTINGS, DEFAULT_SETTINGS, SHORTHANDOFF
from sphinx.builders.latex.theming import Theme, ThemeFactory
from sphinx.builders.latex.util import ExtBabel
from sphinx.config import ENUM, Config
from sphinx.deprecation import RemovedInSphinx50Warning
from sphinx.environment.adapters.asset import ImageAdapter
from sphinx.errors import NoUri, SphinxError
from sphinx.locale import _, __
from sphinx.util import logging, progress_message, status_iterator, texescape
from sphinx.util.console import bold, darkgreen # type: ignore
from sphinx.util.docutils import SphinxFileOutput, new_document
from sphinx.util.fileutil import copy_asset_file
from sphinx.util.i18n import format_date
from sphinx.util.nodes import inline_all_toctrees
from sphinx.util.osutil import SEP, make_filename_from_project
from sphinx.util.template import LaTeXRenderer
from sphinx.writers.latex import LaTeXTranslator, LaTeXWriter
# load docutils.nodes after loading sphinx.builders.latex.nodes
from docutils import nodes # isort:skip
XINDY_LANG_OPTIONS = {
# language codes from docutils.writers.latex2e.Babel
# ! xindy language names may differ from those in use by LaTeX/babel
# ! xindy does not support all Latin scripts as recognized by LaTeX/babel
# ! not all xindy-supported languages appear in Babel.language_codes
# cd /usr/local/texlive/2018/texmf-dist/xindy/modules/lang
# find . -name '*utf8.xdy'
# LATIN
'sq': '-L albanian -C utf8 ',
'hr': '-L croatian -C utf8 ',
'cs': '-L czech -C utf8 ',
'da': '-L danish -C utf8 ',
'nl': '-L dutch-ij-as-ij -C utf8 ',
'en': '-L english -C utf8 ',
'eo': '-L esperanto -C utf8 ',
'et': '-L estonian -C utf8 ',
'fi': '-L finnish -C utf8 ',
'fr': '-L french -C utf8 ',
'de': '-L german-din5007 -C utf8 ',
'is': '-L icelandic -C utf8 ',
'it': '-L italian -C utf8 ',
'la': '-L latin -C utf8 ',
'lv': '-L latvian -C utf8 ',
'lt': '-L lithuanian -C utf8 ',
'dsb': '-L lower-sorbian -C utf8 ',
'ds': '-L lower-sorbian -C utf8 ', # trick, no conflict
'nb': '-L norwegian -C utf8 ',
'no': '-L norwegian -C utf8 ', # and what about nynorsk?
'pl': '-L polish -C utf8 ',
'pt': '-L portuguese -C utf8 ',
'ro': '-L romanian -C utf8 ',
'sk': '-L slovak-small -C utf8 ', # there is also slovak-large
'sl': '-L slovenian -C utf8 ',
'es': '-L spanish-modern -C utf8 ', # there is also spanish-traditional
'sv': '-L swedish -C utf8 ',
'tr': '-L turkish -C utf8 ',
'hsb': '-L upper-sorbian -C utf8 ',
'hs': '-L upper-sorbian -C utf8 ', # trick, no conflict
'vi': '-L vietnamese -C utf8 ',
# CYRILLIC
# for usage with pdflatex, needs also cyrLICRutf8.xdy module
'be': '-L belarusian -C utf8 ',
'bg': '-L bulgarian -C utf8 ',
'mk': '-L macedonian -C utf8 ',
'mn': '-L mongolian-cyrillic -C utf8 ',
'ru': '-L russian -C utf8 ',
'sr': '-L serbian -C utf8 ',
'sh-cyrl': '-L serbian -C utf8 ',
'sh': '-L serbian -C utf8 ', # trick, no conflict
'uk': '-L ukrainian -C utf8 ',
# GREEK
# can work only with xelatex/lualatex, not supported by texindy+pdflatex
'el': '-L greek -C utf8 ',
# FIXME, not compatible with [:2] slice but does Sphinx support Greek ?
'el-polyton': '-L greek-polytonic -C utf8 ',
}
XINDY_CYRILLIC_SCRIPTS = [
'be', 'bg', 'mk', 'mn', 'ru', 'sr', 'sh', 'uk',
]
logger = logging.getLogger(__name__)
class LaTeXBuilder(Builder):
"""
Builds LaTeX output to create PDF.
"""
name = 'latex'
format = 'latex'
epilog = __('The LaTeX files are in %(outdir)s.')
if os.name == 'posix':
epilog += __("\nRun 'make' in that directory to run these through "
"(pdf)latex\n"
"(use `make latexpdf' here to do that automatically).")
supported_image_types = ['application/pdf', 'image/png', 'image/jpeg']
supported_remote_images = False
default_translator_class = LaTeXTranslator
def init(self) -> None:
self.babel: ExtBabel = None
self.context: Dict[str, Any] = {}
self.docnames: Iterable[str] = {}
self.document_data: List[Tuple[str, str, str, str, str, bool]] = []
self.themes = ThemeFactory(self.app)
texescape.init()
self.init_context()
self.init_babel()
self.init_multilingual()
def get_outdated_docs(self) -> Union[str, List[str]]:
return 'all documents' # for now
def get_target_uri(self, docname: str, typ: str = None) -> str:
if docname not in self.docnames:
raise NoUri(docname, typ)
else:
return '%' + docname
def get_relative_uri(self, from_: str, to: str, typ: str = None) -> str:
# ignore source path
return self.get_target_uri(to, typ)
def init_document_data(self) -> None:
preliminary_document_data = [list(x) for x in self.config.latex_documents]
if not preliminary_document_data:
logger.warning(__('no "latex_documents" config value found; no documents '
'will be written'))
return
# assign subdirs to titles
self.titles: List[Tuple[str, str]] = []
for entry in preliminary_document_data:
docname = entry[0]
if docname not in self.env.all_docs:
logger.warning(__('"latex_documents" config value references unknown '
'document %s'), docname)
continue
self.document_data.append(entry) # type: ignore
if docname.endswith(SEP + 'index'):
docname = docname[:-5]
self.titles.append((docname, entry[2]))
def init_context(self) -> None:
self.context = DEFAULT_SETTINGS.copy()
# Add special settings for latex_engine
self.context.update(ADDITIONAL_SETTINGS.get(self.config.latex_engine, {}))
# Add special settings for (latex_engine, language_code)
if self.config.language:
key = (self.config.latex_engine, self.config.language[:2])
self.context.update(ADDITIONAL_SETTINGS.get(key, {}))
# Apply user settings to context
self.context.update(self.config.latex_elements)
self.context['release'] = self.config.release
self.context['use_xindy'] = self.config.latex_use_xindy
if self.config.today:
self.context['date'] = self.config.today
else:
self.context['date'] = format_date(self.config.today_fmt or _('%b %d, %Y'),
language=self.config.language)
if self.config.latex_logo:
self.context['logofilename'] = path.basename(self.config.latex_logo)
# for compatibilities
self.context['indexname'] = _('Index')
if self.config.release:
# Show the release label only if release value exists
self.context.setdefault('releasename', _('Release'))
def update_context(self) -> None:
"""Update template variables for .tex file just before writing."""
# Apply extension settings to context
registry = self.app.registry
self.context['packages'] = registry.latex_packages
self.context['packages_after_hyperref'] = registry.latex_packages_after_hyperref
def init_babel(self) -> None:
self.babel = ExtBabel(self.config.language, not self.context['babel'])
if self.config.language and not self.babel.is_supported_language():
# emit warning if specified language is invalid
# (only emitting, nothing changed to processing)
logger.warning(__('no Babel option known for language %r'),
self.config.language)
def init_multilingual(self) -> None:
if self.context['latex_engine'] == 'pdflatex':
if not self.babel.uses_cyrillic():
if 'X2' in self.context['fontenc']:
self.context['substitutefont'] = '\\usepackage{substitutefont}'
self.context['textcyrillic'] = ('\\usepackage[Xtwo]'
'{sphinxpackagecyrillic}')
elif 'T2A' in self.context['fontenc']:
self.context['substitutefont'] = '\\usepackage{substitutefont}'
self.context['textcyrillic'] = ('\\usepackage[TtwoA]'
'{sphinxpackagecyrillic}')
if 'LGR' in self.context['fontenc']:
self.context['substitutefont'] = '\\usepackage{substitutefont}'
else:
self.context['textgreek'] = ''
if self.context['substitutefont'] == '':
self.context['fontsubstitution'] = ''
# 'babel' key is public and user setting must be obeyed
if self.context['babel']:
self.context['classoptions'] += ',' + self.babel.get_language()
# this branch is not taken for xelatex/lualatex if default settings
self.context['multilingual'] = self.context['babel']
if self.config.language:
self.context['shorthandoff'] = SHORTHANDOFF
# Times fonts don't work with Cyrillic languages
if self.babel.uses_cyrillic() and 'fontpkg' not in self.config.latex_elements:
self.context['fontpkg'] = ''
elif self.context['polyglossia']:
self.context['classoptions'] += ',' + self.babel.get_language()
options = self.babel.get_mainlanguage_options()
if options:
language = r'\setmainlanguage[%s]{%s}' % (options, self.babel.get_language())
else:
language = r'\setmainlanguage{%s}' % self.babel.get_language()
self.context['multilingual'] = '%s\n%s' % (self.context['polyglossia'], language)
def write_stylesheet(self) -> None:
highlighter = highlighting.PygmentsBridge('latex', self.config.pygments_style)
stylesheet = path.join(self.outdir, 'sphinxhighlight.sty')
with open(stylesheet, 'w') as f:
f.write('\\NeedsTeXFormat{LaTeX2e}[1995/12/01]\n')
f.write('\\ProvidesPackage{sphinxhighlight}'
'[2016/05/29 stylesheet for highlighting with pygments]\n')
f.write('% Its contents depend on pygments_style configuration variable.\n\n')
f.write(highlighter.get_stylesheet())
def write(self, *ignored: Any) -> None:
docwriter = LaTeXWriter(self)
docsettings: Any = OptionParser(
defaults=self.env.settings,
components=(docwriter,),
read_config_files=True).get_default_values()
self.init_document_data()
self.write_stylesheet()
for entry in self.document_data:
docname, targetname, title, author, themename = entry[:5]
theme = self.themes.get(themename)
toctree_only = False
if len(entry) > 5:
toctree_only = entry[5]
destination = SphinxFileOutput(destination_path=path.join(self.outdir, targetname),
encoding='utf-8', overwrite_if_changed=True)
with progress_message(__("processing %s") % targetname):
doctree = self.env.get_doctree(docname)
toctree = next(iter(doctree.traverse(addnodes.toctree)), None)
if toctree and toctree.get('maxdepth') > 0:
tocdepth = toctree.get('maxdepth')
else:
tocdepth = None
doctree = self.assemble_doctree(
docname, toctree_only,
appendices=(self.config.latex_appendices if theme.name != 'howto' else []))
doctree['docclass'] = theme.docclass
doctree['contentsname'] = self.get_contentsname(docname)
doctree['tocdepth'] = tocdepth
self.post_process_images(doctree)
self.update_doc_context(title, author, theme)
self.update_context()
with progress_message(__("writing")):
docsettings._author = author
docsettings._title = title
docsettings._contentsname = doctree['contentsname']
docsettings._docname = docname
docsettings._docclass = theme.name
doctree.settings = docsettings
docwriter.theme = theme
docwriter.write(doctree, destination)
def get_contentsname(self, indexfile: str) -> str:
tree = self.env.get_doctree(indexfile)
contentsname = None
for toctree in tree.traverse(addnodes.toctree):
if 'caption' in toctree:
contentsname = toctree['caption']
break
return contentsname
def update_doc_context(self, title: str, author: str, theme: Theme) -> None:
self.context['title'] = title
self.context['author'] = author
self.context['docclass'] = theme.docclass
self.context['papersize'] = theme.papersize
self.context['pointsize'] = theme.pointsize
self.context['wrapperclass'] = theme.wrapperclass
def assemble_doctree(self, indexfile: str, toctree_only: bool, appendices: List[str]) -> nodes.document: # NOQA
self.docnames = set([indexfile] + appendices)
logger.info(darkgreen(indexfile) + " ", nonl=True)
tree = self.env.get_doctree(indexfile)
tree['docname'] = indexfile
if toctree_only:
# extract toctree nodes from the tree and put them in a
# fresh document
new_tree = new_document('<latex output>')
new_sect = nodes.section()
new_sect += nodes.title('<Set title in conf.py>',
'<Set title in conf.py>')
new_tree += new_sect
for node in tree.traverse(addnodes.toctree):
new_sect += node
tree = new_tree
largetree = inline_all_toctrees(self, self.docnames, indexfile, tree,
darkgreen, [indexfile])
largetree['docname'] = indexfile
for docname in appendices:
appendix = self.env.get_doctree(docname)
appendix['docname'] = docname
largetree.append(appendix)
logger.info('')
logger.info(__("resolving references..."))
self.env.resolve_references(largetree, indexfile, self)
# resolve :ref:s to distant tex files -- we can't add a cross-reference,
# but append the document name
for pendingnode in largetree.traverse(addnodes.pending_xref):
docname = pendingnode['refdocname']
sectname = pendingnode['refsectname']
newnodes: List[Node] = [nodes.emphasis(sectname, sectname)]
for subdir, title in self.titles:
if docname.startswith(subdir):
newnodes.append(nodes.Text(_(' (in '), _(' (in ')))
newnodes.append(nodes.emphasis(title, title))
newnodes.append(nodes.Text(')', ')'))
break
else:
pass
pendingnode.replace_self(newnodes)
return largetree
def finish(self) -> None:
self.copy_image_files()
self.write_message_catalog()
self.copy_support_files()
if self.config.latex_additional_files:
self.copy_latex_additional_files()
@progress_message(__('copying TeX support files'))
def copy_support_files(self) -> None:
"""copy TeX support files from texinputs."""
# configure usage of xindy (impacts Makefile and latexmkrc)
# FIXME: convert this rather to a confval with suitable default
# according to language ? but would require extra documentation
if self.config.language:
xindy_lang_option = \
XINDY_LANG_OPTIONS.get(self.config.language[:2],
'-L general -C utf8 ')
xindy_cyrillic = self.config.language[:2] in XINDY_CYRILLIC_SCRIPTS
else:
xindy_lang_option = '-L english -C utf8 '
xindy_cyrillic = False
context = {
'latex_engine': self.config.latex_engine,
'xindy_use': self.config.latex_use_xindy,
'xindy_lang_option': xindy_lang_option,
'xindy_cyrillic': xindy_cyrillic,
}
logger.info(bold(__('copying TeX support files...')))
staticdirname = path.join(package_dir, 'texinputs')
for filename in os.listdir(staticdirname):
if not filename.startswith('.'):
copy_asset_file(path.join(staticdirname, filename),
self.outdir, context=context)
# use pre-1.6.x Makefile for make latexpdf on Windows
if os.name == 'nt':
staticdirname = path.join(package_dir, 'texinputs_win')
copy_asset_file(path.join(staticdirname, 'Makefile_t'),
self.outdir, context=context)
@progress_message(__('copying additional files'))
def copy_latex_additional_files(self) -> None:
for filename in self.config.latex_additional_files:
logger.info(' ' + filename, nonl=True)
copy_asset_file(path.join(self.confdir, filename), self.outdir)
def copy_image_files(self) -> None:
if self.images:
stringify_func = ImageAdapter(self.app.env).get_original_image_uri
for src in status_iterator(self.images, __('copying images... '), "brown",
len(self.images), self.app.verbosity,
stringify_func=stringify_func):
dest = self.images[src]
try:
copy_asset_file(path.join(self.srcdir, src),
path.join(self.outdir, dest))
except Exception as err:
logger.warning(__('cannot copy image file %r: %s'),
path.join(self.srcdir, src), err)
if self.config.latex_logo:
if not path.isfile(path.join(self.confdir, self.config.latex_logo)):
raise SphinxError(__('logo file %r does not exist') % self.config.latex_logo)
else:
copy_asset_file(path.join(self.confdir, self.config.latex_logo), self.outdir)
def write_message_catalog(self) -> None:
formats = self.config.numfig_format
context = {
'addtocaptions': r'\@iden',
'figurename': formats.get('figure', '').split('%s', 1),
'tablename': formats.get('table', '').split('%s', 1),
'literalblockname': formats.get('code-block', '').split('%s', 1)
}
if self.context['babel'] or self.context['polyglossia']:
context['addtocaptions'] = r'\addto\captions%s' % self.babel.get_language()
filename = path.join(package_dir, 'templates', 'latex', 'sphinxmessages.sty_t')
copy_asset_file(filename, self.outdir, context=context, renderer=LaTeXRenderer())
@property
def usepackages(self) -> List[Tuple[str, str]]:
warnings.warn('LaTeXBuilder.usepackages is deprecated.',
RemovedInSphinx50Warning, stacklevel=2)
return self.app.registry.latex_packages
@property
def usepackages_after_hyperref(self) -> List[Tuple[str, str]]:
warnings.warn('LaTeXBuilder.usepackages_after_hyperref is deprecated.',
RemovedInSphinx50Warning, stacklevel=2)
return self.app.registry.latex_packages_after_hyperref
def validate_config_values(app: Sphinx, config: Config) -> None:
for key in list(config.latex_elements):
if key not in DEFAULT_SETTINGS:
msg = __("Unknown configure key: latex_elements[%r], ignored.")
logger.warning(msg % (key,))
config.latex_elements.pop(key)
def validate_latex_theme_options(app: Sphinx, config: Config) -> None:
for key in list(config.latex_theme_options):
if key not in Theme.UPDATABLE_KEYS:
msg = __("Unknown theme option: latex_theme_options[%r], ignored.")
logger.warning(msg % (key,))
config.latex_theme_options.pop(key)
def install_packages_for_ja(app: Sphinx) -> None:
"""Install packages for Japanese."""
if app.config.language == 'ja' and app.config.latex_engine in ('platex', 'uplatex'):
app.add_latex_package('pxjahyper', after_hyperref=True)
def default_latex_engine(config: Config) -> str:
""" Better default latex_engine settings for specific languages. """
if config.language == 'ja':
return 'uplatex'
elif (config.language or '').startswith('zh'):
return 'xelatex'
elif config.language == 'el':
return 'xelatex'
else:
return 'pdflatex'
def default_latex_docclass(config: Config) -> Dict[str, str]:
""" Better default latex_docclass settings for specific languages. """
if config.language == 'ja':
if config.latex_engine == 'uplatex':
return {'manual': 'ujbook',
'howto': 'ujreport'}
else:
return {'manual': 'jsbook',
'howto': 'jreport'}
else:
return {}
def default_latex_use_xindy(config: Config) -> bool:
""" Better default latex_use_xindy settings for specific engines. """
return config.latex_engine in {'xelatex', 'lualatex'}
def default_latex_documents(config: Config) -> List[Tuple[str, str, str, str, str]]:
""" Better default latex_documents settings. """
project = texescape.escape(config.project, config.latex_engine)
author = texescape.escape(config.author, config.latex_engine)
return [(config.root_doc,
make_filename_from_project(config.project) + '.tex',
texescape.escape_abbr(project),
texescape.escape_abbr(author),
config.latex_theme)]
def setup(app: Sphinx) -> Dict[str, Any]:
app.setup_extension('sphinx.builders.latex.transforms')
app.add_builder(LaTeXBuilder)
app.connect('config-inited', validate_config_values, priority=800)
app.connect('config-inited', validate_latex_theme_options, priority=800)
app.connect('builder-inited', install_packages_for_ja)
app.add_config_value('latex_engine', default_latex_engine, None,
ENUM('pdflatex', 'xelatex', 'lualatex', 'platex', 'uplatex'))
app.add_config_value('latex_documents', default_latex_documents, None)
app.add_config_value('latex_logo', None, None, [str])
app.add_config_value('latex_appendices', [], None)
app.add_config_value('latex_use_latex_multicolumn', False, None)
app.add_config_value('latex_use_xindy', default_latex_use_xindy, None, [bool])
app.add_config_value('latex_toplevel_sectioning', None, None,
ENUM(None, 'part', 'chapter', 'section'))
app.add_config_value('latex_domain_indices', True, None, [list])
app.add_config_value('latex_show_urls', 'no', None)
app.add_config_value('latex_show_pagerefs', False, None)
app.add_config_value('latex_elements', {}, None)
app.add_config_value('latex_additional_files', [], None)
app.add_config_value('latex_theme', 'manual', None, [str])
app.add_config_value('latex_theme_options', {}, None)
app.add_config_value('latex_theme_path', [], None, [list])
app.add_config_value('latex_docclass', default_latex_docclass, None)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| sonntagsgesicht/regtest | .aux/venv/lib/python3.9/site-packages/sphinx/builders/latex/__init__.py | Python | apache-2.0 | 24,543 |
from holmium.core import (
Page, Element, Locators, Elements, ElementMap, Section, Sections
)
from holmium.core.cucumber import init_steps
init_steps()
class TestSection(Section):
el = Element(Locators.NAME, "el")
els = Elements(Locators.NAME, "els")
elmap = ElementMap(Locators.NAME, "elmap")
class TestSections(Sections):
el = Element(Locators.NAME, "el")
class TestPage(Page):
el = Element(Locators.NAME, "el")
els = Elements(Locators.NAME, "els")
elmap = ElementMap(Locators.NAME, "elmap")
sections = TestSections(Locators.NAME, "sections")
section = TestSection(Locators.NAME, "section")
def do_stuff(self, a, b):
return a + b
def do_stuff_no_args(self):
return True
def do_stuff_var_args(self, *args, **kwargs):
return args, kwargs
| alisaifee/holmium.core | tests/support/cucumber/steps.py | Python | mit | 823 |
import bisect
import json
import progress
import zoning
def calculate_stream_size(stream):
old_pos = stream.tell()
stream.seek(0, 2)
size = f.tell()
stream.seek(old_pos, 0)
return size
class NullFeatures(object):
def __init__(self, map1_len, map2_len):
self._mapping = map1_len * map2_len
self._max_mapping = self._mapping * map1_len + 1
self.regions = []
def add_null_region(self, fromn, fromi, ton, toi):
fromid = fromn * self._mapping + fromi
toid = ton * self._mapping + toi
bisect.insort_right(self.regions, (fromid, toid))
def is_null(self, n, i):
fid = n * self._mapping + i
j = bisect.bisect_right(self.regions, (fid, self._max_mapping))
if j == 0:
return False
else:
return self.regions[j - 1][1] >= fid
def load_save_file(stream, logger = None):
if hasattr(stream, "name"):
estimator = progress.TimeEstimator(logger, 0, calculate_stream_size(stream), precision = 1)
else:
estimator = None
save = {}
map1_len = None
map2_len = None
null_features = None
last_index = None
for line in stream:
if estimator is not None:
estimator.increment(len(line))
data = json.loads(line)
if map1_len is None:
map1_len = data["MAP1_LEN"]
map2_len = data["MAP2_LEN"]
save[None] = null_features = NullFeatures(map1_len, map2_len)
continue;
elif data[0] is None:
fromn, fromi = data[1]
ton, toi = data[2]
last_index = data[2]
null_features.add_null_region(fromn, fromi, ton, toi)
#for n in range(fromn, ton + 1):
# for i in range(fromi, map2_len):
# if n == ton and i == toi:
# break
# save[(n, i)] = None
elif data[2] is None:
save[(data[0], data[1])] = None
last_index = data[:2]
else:
features = []
for f in data[2:]:
if f is None:
features.append(None)
else:
features.append(zoning.parse_feature(f))
save[(data[0], data[1])] = features
last_index = data[:2]
save["LAST_INDEX"] = last_index
return save
class StateSaver(object):
def __init__(self, save_state_to, flush_interval = 50000):
self.stream = save_state_to
self.flush_interval = flush_interval
self.current_state_flush = 0
self.last_state_flush = 0
self.nulls_start = None
def record_map_sizes(self, map1_len, map2_len):
if self.stream is not None:
self.stream.write("%s\n" % json.dumps({"MAP1_LEN" : map1_len, "MAP2_LEN" : map2_len}))
def record(self, n, i, *args):
if self.stream is None:
return
self.current_state_flush += 1
flush = (self.current_state_flush - self.last_state_flush) >= self.flush_interval
if args:
if self.nulls_start:
line = "[null,[%d,%d],[%d,%d]]" % (self.nulls_start[0], self.nulls_start[1], n, i)
self.stream.write("%s\n" % line)
flush = True
self.nulls_start = None
a = []
for arg in args:
if arg is None:
a.append(None)
else:
a.append(arg.to_geo())
line = json.dumps([n, i] + a)
self.stream.write("%s\n" % line)
else:
if self.nulls_start is None:
self.nulls_start = [n, i]
#line = json.dumps([n, i, None])
if flush:
self.last_state_flush = self.current_state_flush
self.stream.flush()
def intersect(map1, map2, logger = None, previous_save = None, save_state_to = None, incremental_save_path = None, incremental_save_time = 600):
if logger is None:
logger = lambda m : None
map1 = zoning.ModifiableMap(map1)
map2 = zoning.ModifiableMap(map2)
estimator = progress.TimeEstimator(logger, 0, len(map1) * len(map2), precision = 2, interval = 3.0)
saver = StateSaver(save_state_to)
last_incremental_save = 0
if previous_save is not None:
logger("\r%s\rFast-forwarding using saved state...\n" % (' ' * 40))
last_n, last_i = previous_save["LAST_INDEX"]
estimator.end_value = (last_n - 1) * len(map2) + last_i
else:
saver.record_map_sizes(len(map1), len(map2))
for n, f1 in enumerate(map1):
if f1.geometry.is_empty:
continue
for i, f2 in enumerate(map2):
if previous_save is not None and n <= last_n:
if (n, i) in previous_save:
state = previous_save[(n,i)]
if state is None:
continue
map2[i] = state[0]
if state[1] is not None:
map2.append(state[1])
map1[n] = state[2]
estimator.increment()
if map1[n].geometry.is_empty:
estimator.increment(len(map2) - i)
break
continue
elif previous_save[None].is_null(n, i):
estimator.increment()
continue
elif n < last_n or (n == last_n and i <= last_i):
estimator.increment()
continue
elif n == last_n and i == last_i:
estimator.end_value = len(map1) * len(map2)
logger("\r%s\rDone.\n" % (' ' * 40))
estimator.update(n * len(map2) + i)
if f2.geometry.is_empty:
saver.record(n, i)
continue
try:
isect = f1.geometry.intersection(f2.geometry)
except Exception as e:
logger("\r%s\rError: %s\n" % (' ' * 40, e))
estimator.force_next_refresh()
continue
if isect.is_empty:
saver.record(n, i)
continue
area_delta = 10.0 # square meters
new_feature = zoning.ZoningFeature("%s->%s" % (f1.objectid, f2.objectid), f2.zoning, isect, f2.old_zoning + f1.zoning)
new_state = [None, None, None]
if new_feature.area() < area_delta:
# The intersection is less than area_delta square meters, so it's probably just floating point error.
# Skip it!
saver.record(n, i)
continue
elif f2.area() - area_delta < new_feature.area():
# the intersection is almost covering the entire preexisting area, so just assume that they're identical.
new_feature = zoning.ZoningFeature("%s->%s" % (f1.objectid, f2.objectid), f2.zoning, f2.geometry, f2.old_zoning + f1.zoning)
else:
# add a new feature containing the portion of f2 that does not intersect with f1
new_geom = f2.geometry.difference(new_feature.geometry)
if not new_geom.is_empty:
map2.append(zoning.ZoningFeature("%s.2" % f2.objectid, f2.zoning, new_geom, f2.old_zoning))
estimator.end_value = len(map1) * len(map2)
new_state[1] = map2[-1]
map2[i] = new_feature
new_state[0] = map2[i]
logger("\r%s\rPlot %s (%.02f acres) -> %s (%.02f acres) went from %s to %s\n" % (' ' * 40, f1.objectid, zoning.square_meters_to_acres(f1.area()), f2.objectid, zoning.square_meters_to_acres(new_feature.area()), f1.zoning, f2.zoning))
estimator.force_next_refresh()
# Delete the portion of overlap in f1 to hopefully speed up further comparisons:
# (This is making the assumption that the zoning regions in map2 are non-overlapping)
map1[n] = zoning.ZoningFeature(f1.objectid, f1.zoning, f1.geometry.difference(isect))
new_state[2] = map1[n]
saver.record(n, i, *new_state)
if incremental_save_path and estimator.get_time() - last_incremental_save >= incremental_save_time:
# do an incremental save once every incremental_save_time seconds
logger("\r%s\rDoing an incremental save to %s..." % (' ' * 40, incremental_save_path))
last_incremental_save = estimator.get_time()
with open(incremental_save_path, 'w') as f:
map2.save(f)
if map1[n].geometry.is_empty:
break
logger('\n')
return map2
if __name__ == "__main__":
import os
import sys
with open(sys.argv[1], 'r') as f1:
with open(sys.argv[2], 'r') as f2:
def logger(msg):
sys.stderr.write(msg)
sys.stderr.flush()
previous_save = None
save_state_to = None
if len(sys.argv) >= 4:
if os.path.exists(sys.argv[3]):
logger('Loading save state...\n')
with open(sys.argv[3], 'r') as f:
previous_save = load_save_file(f)
logger("\r%s\rLoaded.\n" % (' ' * 40))
save_state_to = open(sys.argv[3], 'a')
try:
intersected = intersect(zoning.ZoningMap(f1), zoning.ZoningMap(f2), logger = logger, previous_save = previous_save, save_state_to = save_state_to, incremental_save_path = "%s.incremental" % sys.argv[3])
finally:
if save_state_to is not None:
save_state_to.close()
intersected.save(sys.stdout)
os.unlink("%s.incremental" % sys.argv[3])
## Sanity check:
#import StringIO
#output = StringIO.StringIO()
#intersected.save(output)
#output.seek(0)
#list(zoning.ZoningMap(output))
#output.close()
| ESultanik/ZoningMaps | intersect_maps.py | Python | gpl-3.0 | 10,096 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Zeyuan Shang
# @Date: 2015-11-13 22:02:21
# @Last Modified by: Zeyuan Shang
# @Last Modified time: 2015-11-13 22:02:57
from django.conf import settings
from django.template.loader import render_to_string
def analytics(request):
return { 'analytics_code': render_to_string("analytics/analytics.html", { 'google_analytics_key': settings.GOOGLE_ANALYTICS_KEY }) } | cmu-db/db-webcrawler | library/context_processors.py | Python | apache-2.0 | 426 |
# Safe Eyes is a utility to remind you to take break frequently
# to protect your eyes from eye strain.
# Copyright (C) 2017 Gobinath
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import gi
from safeeyes.model import BreakType
gi.require_version('Notify', '0.7')
from gi.repository import Notify
"""
Safe Eyes Notification plugin
"""
APPINDICATOR_ID = 'safeeyes'
notification = None
context = None
warning_time = 10
Notify.init(APPINDICATOR_ID)
def init(ctx, safeeyes_config, plugin_config):
"""
Initialize the plugin.
"""
global context
global warning_time
logging.debug('Initialize Notification plugin')
context = ctx
warning_time = safeeyes_config.get('pre_break_warning_time')
def on_pre_break(break_obj):
"""
Show the notification
"""
# Construct the message based on the type of the next break
global notification
logging.info('Show the notification')
message = '\n'
if break_obj.type == BreakType.SHORT_BREAK:
message += (_('Ready for a short break in %s seconds') % warning_time)
else:
message += (_('Ready for a long break in %s seconds') % warning_time)
notification = Notify.Notification.new('Safe Eyes', message, icon='safeeyes_enabled')
try:
notification.show()
except BaseException:
logging.error('Failed to show the notification')
def on_start_break(break_obj):
"""
Close the notification.
"""
global notification
logging.info('Close pre-break notification')
if notification:
try:
notification.close()
notification = None
except BaseException:
# Some operating systems automatically close the notification.
pass
def on_exit():
"""
Uninitialize the registered notificaion.
"""
logging.debug('Stop Notification plugin')
Notify.uninit()
| bayuah/SafeEyes | safeeyes/plugins/notification/plugin.py | Python | gpl-3.0 | 2,488 |
import io
from struct import Struct
from unittest import TestCase
from mcflint import nbt
def create_parser(data):
return nbt.NBTParser(io.BytesIO(data))
class TestParser(TestCase):
def test_readers(self):
parser = create_parser(b'')
self.assertEqual(len(parser.readers),
len([reader for reader in dir(parser) if reader.startswith('read_')]))
self.assertIn('string', parser.readers.keys())
self.assertNotIn('list', parser.readers.keys())
def test_read(self):
self.assertEqual(create_parser(b'\x00\x01\x41').read('string'), 'A')
with self.assertRaises(KeyError):
create_parser(b'').read('foo')
def test_read_past_end(self):
with self.assertRaises(nbt.UnexpectedEndOfBuffer):
create_parser(b'\x00\x10spam').read_string()
with self.assertRaises(nbt.UnexpectedEndOfBuffer):
create_parser(b'').read_double()
def test_read_number(self):
self.assertEqual(create_parser(b'\x01\x00')._read_number('short'), 256)
with self.assertRaises(KeyError):
create_parser(b'')._read_number('bar')
def test_read_bytes(self):
fmt = Struct('>b')
self.assertEqual(create_parser(fmt.pack(2)).read_char(), 2)
self.assertEqual(create_parser(fmt.pack(-16)).read_char(), -16)
def test_read_shorts(self):
fmt = Struct('>h')
self.assertEqual(create_parser(fmt.pack(1256)).read_short(), 1256)
self.assertEqual(create_parser(fmt.pack(-236)).read_short(), -236)
def test_read_ints(self):
fmt = Struct('>i')
self.assertEqual(create_parser(fmt.pack(-98887236)).read_int(), -98887236)
self.assertEqual(create_parser(fmt.pack(2369)).read_int(), 2369)
def test_read_longs(self):
fmt = Struct('>q')
self.assertEqual(create_parser(fmt.pack(11529215046068469)).read_long(), 11529215046068469)
self.assertEqual(create_parser(fmt.pack(-14)).read_long(), -14)
def test_read_floats(self):
fmt = Struct('>f')
self.assertAlmostEqual(create_parser(fmt.pack(-965.50123)).read_float(), -965.50123, places=4)
self.assertAlmostEqual(create_parser(fmt.pack(165.236)).read_float(), 165.236, places=4)
def test_read_doubles(self):
fmt = Struct('>d')
self.assertEqual(create_parser(fmt.pack(1.2567890123456789)).read_double(), 1.2567890123456789)
self.assertEqual(create_parser(fmt.pack(-1.90123456789)).read_double(), -1.90123456789)
def test_read_string(self):
parser = create_parser(b'\x00\x05hello\x00\x05world')
self.assertEqual(parser.read_string(), 'hello')
self.assertEqual(parser.read_string(), 'world')
with self.assertRaises(nbt.ParsingError):
create_parser(b'\x00\x01\x89').read_string()
def test_read_tag_name(self):
fmt = Struct('>b')
self.assertEqual(create_parser(fmt.pack(10)).read_tag_name(), 'compound')
self.assertEqual(create_parser(fmt.pack(0)).read_tag_name(), 'end')
self.assertEqual(create_parser(fmt.pack(3)).read_tag_name(), 'int')
with self.assertRaises(nbt.ParsingError):
create_parser(fmt.pack(24)).read_tag_name()
def test_parsers(self):
parser = create_parser(b'')
self.assertEqual(len(parser.parsers),
len([tag_parser for tag_parser in dir(parser) if tag_parser.startswith('parse_')]))
self.assertIn('int array', parser.parsers.keys())
self.assertIn('byte', parser.parsers.keys())
self.assertNotIn('egg', parser.parsers.keys())
def test_parse(self):
result = create_parser(b'\x00\x01\x41').parse('string')
self.assertIsInstance(result, nbt.TAG_String)
self.assertEqual(result, 'A')
with self.assertRaises(KeyError):
create_parser(b'').parse('bar')
def test_parse_past_end(self):
with self.assertRaises(nbt.UnexpectedEndOfBuffer):
create_parser(b'\x00\x10spam').parse_string_tag()
with self.assertRaises(nbt.UnexpectedEndOfBuffer):
create_parser(b'').parse_compound_tag()
with self.assertRaises(nbt.UnexpectedEndOfBuffer):
create_parser(b'\x02\x00\x00\x01').parse_list_tag()
with self.assertRaises(nbt.UnexpectedEndOfBuffer):
create_parser(b'\x00\x00\x00\x02').parse_int_array_tag()
with self.assertRaises(nbt.UnexpectedEndOfBuffer):
create_parser(b'\x00\x00\x10\x02\x03\x05').parse_byte_array_tag()
with self.assertRaises(nbt.UnexpectedEndOfBuffer):
create_parser(b'').parse_double_tag()
def test_parse_byte_tag(self):
fmt = Struct('>b')
tag = create_parser(fmt.pack(64)).parse_byte_tag()
self.assertEqual(tag, 64)
self.assertIsInstance(tag, nbt.TAG_Byte)
def test_parse_short_tag(self):
fmt = Struct('>h')
tag = create_parser(fmt.pack(1234)).parse_short_tag()
self.assertEqual(tag, 1234)
self.assertIsInstance(tag, nbt.TAG_Short)
def test_parse_int_tag(self):
fmt = Struct('>i')
tag = create_parser(fmt.pack(-635987)).parse_int_tag()
self.assertEqual(tag, -635987)
self.assertIsInstance(tag, nbt.TAG_Int)
def test_parse_long_tag(self):
fmt = Struct('>q')
tag = create_parser(fmt.pack(25698665412668)).parse_long_tag()
self.assertEqual(tag, 25698665412668)
self.assertIsInstance(tag, nbt.TAG_Long)
def test_parse_float_tag(self):
fmt = Struct('>f')
tag = create_parser(fmt.pack(32.5236)).parse_float_tag()
self.assertAlmostEqual(tag, 32.5236, places=4)
self.assertIsInstance(tag, nbt.TAG_Float)
def test_parse_double_tag(self):
fmt = Struct('>d')
tag = create_parser(fmt.pack(256986654126.68)).parse_double_tag()
self.assertEqual(tag, 256986654126.68)
self.assertIsInstance(tag, nbt.TAG_Double)
def test_parse_byte_array_tag(self):
tag = create_parser(b'\x00\x00\x00\x02\x10\xf2').parse_byte_array_tag()
self.assertEqual(tag[0], 16)
self.assertEqual(tag[1], -14)
self.assertEqual(len(tag), 2)
with self.assertRaises(IndexError):
_ = tag[2]
def test_parse_string_tag(self):
tag = create_parser(b'\x00\x05Hello').parse_string_tag()
self.assertEqual(tag, 'Hello')
self.assertEqual(len(tag), 5)
self.assertIsInstance(tag, nbt.TAG_String)
with self.assertRaises(nbt.UnexpectedEndOfBuffer):
create_parser(b'\x10\x00some string').read_string()
def test_parse_list_tag(self):
tag = create_parser(b'\x08\x00\x00\x00\x01\x00\x03abc').parse_list_tag()
self.assertEqual(tag, ['abc'])
self.assertEqual(len(tag), 1)
self.assertIsInstance(tag, nbt.TAG_List)
self.assertIsInstance(tag[0], nbt.TAG_String)
with self.assertRaises(IndexError):
_ = tag[1]
def test_parse_compound_tag(self):
self.assertEqual(len(create_parser(b'\x00').parse_compound_tag()), 0)
tag = create_parser(b'\x01\x00\x03key\xf5\x00').parse_compound_tag()
self.assertEqual(tag, {'key': -11})
self.assertIsInstance(tag, nbt.TAG_Compound)
self.assertEqual(len(tag), 1)
self.assertEqual(tag['key'], -11)
self.assertIsInstance(tag['key'], nbt.TAG_Byte)
with self.assertRaises(KeyError):
_ = tag['foo']
with self.assertRaises(nbt.UnexpectedEndOfBuffer):
create_parser(b'\x01\x00\x00\xff').parse_compound_tag()
def test_parse_int_array_tag(self):
tag = create_parser(b'\x00\x00\x00\x01\xff\xff\xff\xf0').parse_int_array_tag()
self.assertEqual(tag, [-16])
self.assertIsInstance(tag, nbt.TAG_Int_Array)
self.assertIsInstance(tag[0], int)
self.assertEqual(len(tag), 1)
with self.assertRaises(IndexError):
_ = tag[2]
class TestEncoder(TestCase):
def test_encode(self):
encoder = nbt.NBTEncoder('string')
self.assertEqual(encoder.encode(nbt.TAG_String('A')), b'\x00\x01A')
with self.assertRaises(AttributeError):
nbt.NBTEncoder('bar')
def test_encode_numeric(self):
self.assertEqual(nbt.NBTEncoder._encode_numeric(2, 'char'), b'\x02')
with self.assertRaises(KeyError):
nbt.NBTEncoder._encode_numeric(5, 'egg')
def test_encode_byte(self):
self.assertEqual(nbt.NBTEncoder.encode_byte(-6), b'\xfa')
self.assertEqual(nbt.NBTEncoder.encode_byte(6), b'\x06')
with self.assertRaises(nbt.EncodingError):
nbt.NBTEncoder.encode_byte(569)
def test_encode_short(self):
self.assertEqual(nbt.NBTEncoder.encode_short(-32), b'\xff\xe0')
self.assertEqual(nbt.NBTEncoder.encode_short(32), b'\x00\x20')
with self.assertRaises(nbt.EncodingError):
nbt.NBTEncoder.encode_short(-236569)
def test_encode_int(self):
self.assertEqual(nbt.NBTEncoder.encode_int(257), b'\x00\x00\x01\x01')
self.assertEqual(nbt.NBTEncoder.encode_int(-257), b'\xff\xff\xfe\xff')
with self.assertRaises(nbt.EncodingError):
nbt.NBTEncoder.encode_int(-966995899484849494984)
def test_encode_long(self):
self.assertEqual(nbt.NBTEncoder.encode_long(-2), b'\xff\xff\xff\xff\xff\xff\xff\xfe')
self.assertEqual(nbt.NBTEncoder.encode_long(2), b'\x00\x00\x00\x00\x00\x00\x00\x02')
with self.assertRaises(nbt.EncodingError):
nbt.NBTEncoder.encode_long(9494448289879979494498494947)
def test_encode_float(self):
self.assertEqual(nbt.NBTEncoder.encode_float(-64.25), b'\xc2\x80\x80\x00')
self.assertEqual(nbt.NBTEncoder.encode_float(64.25), b'\x42\x80\x80\x00')
def test_encode_double(self):
self.assertEqual(nbt.NBTEncoder.encode_double(-64.25), b'\xc0P\x10\x00\x00\x00\x00\x00')
self.assertEqual(nbt.NBTEncoder.encode_double(64.25), b'@P\x10\x00\x00\x00\x00\x00')
def test_encode_byte_array(self):
self.assertEqual(nbt.NBTEncoder.encode_byte_array([1, 2]),
b'\x00\x00\x00\x02\x01\x02')
self.assertEqual(nbt.NBTEncoder.encode_byte_array([-9, 127, 0]),
b'\x00\x00\x00\x03\xf7\x7f\x00')
with self.assertRaises(nbt.EncodingError):
nbt.NBTEncoder.encode_byte_array([-300])
def test_encode_string(self):
self.assertEqual(nbt.NBTEncoder.encode_string('hello'), b'\x00\x05hello')
self.assertEqual(nbt.NBTEncoder.encode_string(''), b'\x00\x00')
def test_encode_list(self):
self.assertEqual(nbt.NBTEncoder.encode_list(nbt.TAG_List('short', [nbt.TAG_Short(1), nbt.TAG_Short(1)])),
b'\x02\x00\x00\x00\x02\x00\x01\x00\x01')
with self.assertRaises(nbt.EncodingError):
nbt.NBTEncoder.encode_list(nbt.TAG_List('int', [1, 2]))
with self.assertRaises(nbt.EncodingError):
nbt.NBTEncoder.encode_list(nbt.TAG_List('float', [nbt.TAG_Byte(5)]))
def test_encode_compound(self):
self.assertEqual(nbt.NBTEncoder.encode_compound(nbt.TAG_Compound({'test': nbt.TAG_Byte_Array([1, 2])})),
b'\x07\x00\x04test\x00\x00\x00\x02\x01\x02\x00')
with self.assertRaises(nbt.EncodingError):
nbt.NBTEncoder.encode_compound({'hey': 5})
def test_encode_int_array(self):
self.assertEqual(nbt.NBTEncoder.encode_int_array(nbt.TAG_Int_Array([-1, 1])),
b'\x00\x00\x00\x02\xff\xff\xff\xff\x00\x00\x00\x01')
with self.assertRaises(nbt.EncodingError):
nbt.NBTEncoder.encode_int_array([5669999998865, 663333333333333])
class TestTags(TestCase):
def test_byte_tag(self):
self.assertEqual(nbt.TAG_Byte(), 0)
self.assertEqual(nbt.TAG_Byte(25), 25)
self.assertEqual(nbt.TAG_Byte(-36), -36)
self.assertEqual(nbt.TAG_Byte(-36, name='byte').name, 'byte')
self.assertIs(nbt.TAG_Byte().name, None)
self.assertEqual(nbt.TAG_Byte(32).get_bytes(), b'\x20')
with self.assertRaises(nbt.EncodingError):
nbt.TAG_Byte(896).get_bytes()
def test_short_tag(self):
self.assertEqual(nbt.TAG_Short(), 0)
self.assertEqual(nbt.TAG_Short(5639), 5639)
self.assertEqual(nbt.TAG_Short(-883), -883)
self.assertEqual(nbt.TAG_Short(name='short').name, 'short')
self.assertIs(nbt.TAG_Short().name, None)
self.assertEqual(nbt.TAG_Short(256).get_bytes(), b'\x01\x00')
with self.assertRaises(nbt.EncodingError):
nbt.TAG_Short(88896).get_bytes()
def test_int_tag(self):
self.assertEqual(nbt.TAG_Int(), 0)
self.assertEqual(nbt.TAG_Int(6999885), 6999885)
self.assertEqual(nbt.TAG_Int(-25556), -25556)
self.assertEqual(nbt.TAG_Int(23, name='int').name, 'int')
self.assertIs(nbt.TAG_Int().name, None)
self.assertEqual(nbt.TAG_Int(2048).get_bytes(), b'\x00\x00\x08\x00')
with self.assertRaises(nbt.EncodingError):
nbt.TAG_Int(8956669996).get_bytes()
def test_long_tag(self):
self.assertEqual(nbt.TAG_Long(), 0)
self.assertEqual(nbt.TAG_Long(788956552333), 788956552333)
self.assertEqual(nbt.TAG_Long(-123456898), -123456898)
self.assertEqual(nbt.TAG_Long(-369882, name='long').name, 'long')
self.assertIs(nbt.TAG_Long().name, None)
self.assertEqual(nbt.TAG_Long(-1).get_bytes(), b'\xff\xff\xff\xff\xff\xff\xff\xff')
with self.assertRaises(nbt.EncodingError):
nbt.TAG_Long(895666569999998779996).get_bytes()
def test_float_tag(self):
self.assertEqual(nbt.TAG_Float(), 0)
self.assertEqual(nbt.TAG_Float(5896.8), 5896.8)
self.assertEqual(nbt.TAG_Float(-2003.56), -2003.56)
self.assertEqual(nbt.TAG_Float(78.66339, name='float').name, 'float')
self.assertIs(nbt.TAG_Float().name, None)
self.assertEqual(nbt.TAG_Float(8.369).get_bytes(), b'A\x05\xe7m')
def test_double_tag(self):
self.assertEqual(nbt.TAG_Double(), 0)
self.assertEqual(nbt.TAG_Double(88996660.8888), 88996660.8888)
self.assertEqual(nbt.TAG_Double(-89996572.5585), -89996572.5585)
self.assertEqual(nbt.TAG_Double(88.999, name='double').name, 'double')
self.assertIs(nbt.TAG_Double().name, None)
self.assertEqual(nbt.TAG_Double(78.5555599).get_bytes(), b'@S\xa3\x8eK\x1c^\x05')
def test_byte_array_tag(self):
self.assertEqual(nbt.TAG_Byte_Array(), [])
self.assertEqual(nbt.TAG_Byte_Array([1, 2, 3]), [1, 2, 3])
self.assertEqual(nbt.TAG_Byte_Array([-63, 98, -8]), [-63, 98, -8])
self.assertEqual(nbt.TAG_Byte_Array(name='byte array').name, 'byte array')
self.assertIs(nbt.TAG_Byte_Array().name, None)
self.assertEqual(nbt.TAG_Byte_Array([1, -1]).get_bytes(), b'\x00\x00\x00\x02\x01\xff')
with self.assertRaises(nbt.EncodingError):
nbt.TAG_Byte_Array([1, 2, 588]).get_bytes()
with self.assertRaises(nbt.EncodingError):
nbt.TAG_Byte_Array([1.5]).get_bytes()
def test_string_tag(self):
self.assertEqual(nbt.TAG_String(), '')
self.assertEqual(nbt.TAG_String('hello world'), 'hello world')
self.assertEqual(nbt.TAG_String('foo', name='bar').name, 'bar')
self.assertIs(nbt.TAG_String().name, None)
self.assertEqual(nbt.TAG_String('spam').get_bytes(), b'\x00\x04spam')
with self.assertRaises(nbt.EncodingError):
nbt.TAG_String('a' * 33000).get_bytes()
def test_list_tag(self):
with self.assertRaises(TypeError):
nbt.TAG_List()
self.assertEqual(nbt.TAG_List('int'), [])
self.assertEqual(nbt.TAG_List('string', [nbt.TAG_String('hey')]), ['hey'])
self.assertEqual(nbt.TAG_List('byte', [nbt.TAG_Byte(1), nbt.TAG_Byte(2)], name='egg').name, 'egg')
self.assertIs(nbt.TAG_List('int array').name, None)
self.assertEqual(nbt.TAG_List('byte', [nbt.TAG_Byte(1), nbt.TAG_Byte(2)]).get_bytes(),
b'\x01\x00\x00\x00\x02\x01\x02')
with self.assertRaises(ValueError):
nbt.TAG_List('foo')
with self.assertRaises(nbt.EncodingError):
nbt.TAG_List('int', [1, 2]).get_bytes()
with self.assertRaises(nbt.EncodingError):
nbt.TAG_List('short', [nbt.TAG_String('spam')]).get_bytes()
def test_compound_tag(self):
self.assertEqual(nbt.TAG_Compound(), {})
self.assertEqual(nbt.TAG_Compound({'foo': nbt.TAG_String('bar')}), {'foo': 'bar'})
self.assertEqual(nbt.TAG_Compound(name='egg').name, 'egg')
self.assertIs(nbt.TAG_Compound().name, None)
self.assertEqual(nbt.TAG_Compound({'test': nbt.TAG_Short(-2)}).get_bytes(), b'\x02\x00\x04test\xff\xfe\x00')
with self.assertRaises(ValueError):
nbt.TAG_Compound({'foo': 42})
self.assertEqual(nbt.TAG_Compound({'spam': nbt.TAG_String('egg')})['spam'].name, 'spam')
def test_int_array_tag(self):
self.assertEqual(nbt.TAG_Int_Array(), [])
self.assertEqual(nbt.TAG_Int_Array([552, 32]), [552, 32])
self.assertEqual(nbt.TAG_Int_Array([-6355589, 125555555]), [-6355589, 125555555])
self.assertEqual(nbt.TAG_Int_Array(name='int array').name, 'int array')
self.assertIs(nbt.TAG_Int_Array().name, None)
self.assertEqual(nbt.TAG_Int_Array([1, -1]).get_bytes(), b'\x00\x00\x00\x02\x00\x00\x00\x01\xff\xff\xff\xff')
with self.assertRaises(nbt.EncodingError):
nbt.TAG_Int_Array([1, 111155556692, 588]).get_bytes()
with self.assertRaises(nbt.EncodingError):
nbt.TAG_Int_Array([58.93, 53.1]).get_bytes()
| fizzy81/mcflint | tests/testnbt.py | Python | mit | 18,245 |
from ctypes import *
import os
import sys
import unittest
import test.support
from ctypes.util import find_library
libc_name = None
def setUpModule():
global libc_name
if os.name == "nt":
libc_name = find_library("c")
elif sys.platform == "cygwin":
libc_name = "cygwin1.dll"
else:
libc_name = find_library("c")
if test.support.verbose:
print("libc_name is", libc_name)
class LoaderTest(unittest.TestCase):
unknowndll = "xxrandomnamexx"
def test_load(self):
if libc_name is None:
self.skipTest('could not find libc')
CDLL(libc_name)
CDLL(os.path.basename(libc_name))
self.assertRaises(OSError, CDLL, self.unknowndll)
def test_load_version(self):
if libc_name is None:
self.skipTest('could not find libc')
if os.path.basename(libc_name) != 'libc.so.6':
self.skipTest('wrong libc path for test')
cdll.LoadLibrary("libc.so.6")
# linux uses version, libc 9 should not exist
self.assertRaises(OSError, cdll.LoadLibrary, "libc.so.9")
self.assertRaises(OSError, cdll.LoadLibrary, self.unknowndll)
def test_find(self):
for name in ("c", "m"):
lib = find_library(name)
if lib:
cdll.LoadLibrary(lib)
CDLL(lib)
@unittest.skipUnless(os.name == "nt",
'test specific to Windows')
def test_load_library(self):
# CRT is no longer directly loadable. See issue23606 for the
# discussion about alternative approaches.
#self.assertIsNotNone(libc_name)
if test.support.verbose:
print(find_library("kernel32"))
print(find_library("user32"))
if os.name == "nt":
windll.kernel32.GetModuleHandleW
windll["kernel32"].GetModuleHandleW
windll.LoadLibrary("kernel32").GetModuleHandleW
WinDLL("kernel32").GetModuleHandleW
@unittest.skipUnless(os.name == "nt",
'test specific to Windows')
def test_load_ordinal_functions(self):
import _ctypes_test
dll = WinDLL(_ctypes_test.__file__)
# We load the same function both via ordinal and name
func_ord = dll[2]
func_name = dll.GetString
# addressof gets the address where the function pointer is stored
a_ord = addressof(func_ord)
a_name = addressof(func_name)
f_ord_addr = c_void_p.from_address(a_ord).value
f_name_addr = c_void_p.from_address(a_name).value
self.assertEqual(hex(f_ord_addr), hex(f_name_addr))
self.assertRaises(AttributeError, dll.__getitem__, 1234)
@unittest.skipUnless(os.name == "nt", 'Windows-specific test')
def test_1703286_A(self):
from _ctypes import LoadLibrary, FreeLibrary
# On winXP 64-bit, advapi32 loads at an address that does
# NOT fit into a 32-bit integer. FreeLibrary must be able
# to accept this address.
# These are tests for http://www.python.org/sf/1703286
handle = LoadLibrary("advapi32")
FreeLibrary(handle)
@unittest.skipUnless(os.name == "nt", 'Windows-specific test')
def test_1703286_B(self):
# Since on winXP 64-bit advapi32 loads like described
# above, the (arbitrarily selected) CloseEventLog function
# also has a high address. 'call_function' should accept
# addresses so large.
from _ctypes import call_function
advapi32 = windll.advapi32
# Calling CloseEventLog with a NULL argument should fail,
# but the call should not segfault or so.
self.assertEqual(0, advapi32.CloseEventLog(None))
windll.kernel32.GetProcAddress.argtypes = c_void_p, c_char_p
windll.kernel32.GetProcAddress.restype = c_void_p
proc = windll.kernel32.GetProcAddress(advapi32._handle,
b"CloseEventLog")
self.assertTrue(proc)
# This is the real test: call the function via 'call_function'
self.assertEqual(0, call_function(proc, (None,)))
if __name__ == "__main__":
unittest.main()
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.6.0/Lib/ctypes/test/test_loading.py | Python | mit | 4,202 |
import sys
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-i", "--InputTaxaFile",
action="store", dest="Taxa_File", help="File containing taxa seen in the tree (Long names)")
parser.add_option("-o", "--OutputTaxaFile",
action="store", dest="Output_Taxa_File", help="File to save new smaller publication taxa")
options, args = parser.parse_args()
infile = options.Taxa_File
outfile = options.Output_Taxa_File
taxafile = open(infile, "r").readlines()
linelist = list()
strainlist1 = list()
strainlist2 = list()
for line in taxafile:
x=1
y=1
oldline = line
tabcount = line.count("\t")
line = line.lstrip("\t")
#print temp[:1]
if line[:1] == "A" or line[:1].isdigit():
linesplit = line.split(" ")
if linesplit[0][0] == "A":
termlist = linesplit[0].split("|")
if len(termlist) < 4:
linelist.append(oldline)
continue
strain = termlist[0]
newstrain = strain
while newstrain in strainlist1:
x+=1
newstrain = strain + "_" + str(x)
strain = newstrain
strainlist1.append(strain)
date = termlist[-1]
if termlist[3][:3] == "EPI":
subtype = "H7N9"
else:
subtype = termlist[2]
print "strain = %s\tsubtype = %s\tdate = %s" % (strain, subtype, date)
newline = "%s(%s)\n" % (strain, subtype)
else:
#print linesplit
termlist = linesplit[1].split("|")
if len(termlist) < 4:
linelist.append(oldline)
continue
strain = termlist[0]
newstrain = strain
while newstrain in strainlist2:
y+=1
newstrain = strain + "_" + str(y)
strain = newstrain
strainlist2.append(strain)
date = termlist[-1][:-1]
if termlist[3][:3] == "EPI":
subtype = "H7N9"
else:
subtype = termlist[2]
print "strain = %s\tsubtype = %s\tdate = %s" % (strain, subtype, date)
newline = "\t\t%s %s(%s),\n" % (linesplit[0], strain, subtype)
linelist.append(newline)
else:
linelist.append(oldline)
newstring = ''
for line in linelist:
newstring = newstring + line
outhandle = open(outfile, "w")
outhandle.write(newstring)
outhandle.close()
#for treefile in tree:
# terminallist = treefile.get_terminals()
#for term in terminallist:
# terminal_name = term.name
# #terminal_name = terminal_name.replace("'", "")
# #terminal_name = terminal_name.replace(".", "_")
# termlist = terminal_name.split("|")
# strain = termlist[0]
# date = termlist[-1]
# if "EPI" in terminal_name:
# subtype = "H7N9"
# else:
# subtype = termlist[2]
# print "strain = %s\tsubtype = %s\tdate = %s" % (strain, subtype, date)
##Phylo.write(tree, outfile, "nexus")
| belandbioinfo/GroundControl | Scripts/change_taxa_names.py | Python | gpl-2.0 | 3,065 |
import os
MR_BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.." ))
MR_CMAPPER_PATH = os.path.join(MR_BASE_PATH, "cmapper" )
MR_MRCAP_PATH = os.path.join(MR_BASE_PATH, "mrcap" )
os.sys.path += [ MR_BASE_PATH, MR_CMAPPER_PATH, MR_MRCAP_PATH ]
| openconnectome/m2g | MR-OCP/MROCPdjango/pipeline/utils/__init__.py | Python | apache-2.0 | 271 |
#!/user/bin/python
'''
This script uses SimpleCV to grab an image from the camera and numpy to find an infrared LED and report its position relative to the camera view centre and whether it is inside the target area.
Attempted stabilisation of the output by tracking a circular object instead and altering exposure of the camera.
'''
# make it possible to import from parent directory:
import sys
sys.path.insert(0,'..')
## Change terminal window header for easier identification of contents
sys.stdout.write("\x1b]2;Sensors/simpleCV_3.py\x07")
import time, math, SimpleCV
import zmq, json
import subprocess as sp
from globalVars import CHANNEL_TARGETDATA
from globalVars import CAMERA_ID_NUMBER
printing = True
dpx = 0.0025 # approximate amount of degrees per pixel for Trust eLight
width = 1920
height = 1080
camera_id = 'video' + str(CAMERA_ID_NUMBER)
# To increase framerate, count the search() loops and render every n frames
renderFrame = 5
frame = 0
# Adjust camera settings from OS, since SimpleCV's commands don't do anything:
sp.call(["uvcdynctrl -d '"+camera_id+"' -s 'Exposure, Auto' 1"], shell = True) # Disable auto exposure
sp.call(["uvcdynctrl -d '"+camera_id+"' -s 'Exposure (Absolute)' 12"], shell = True) # Set absolute exposure
display = SimpleCV.Display()
cam = SimpleCV.Camera(CAMERA_ID_NUMBER, {"width":width,"height":height})
#target box for the marker
box_d = 20
yTgt = (height/2-box_d, height/2+box_d)
xTgt = (width/2-box_d, width/2+box_d)
box_clr = SimpleCV.Color.RED
centre = (height/2, width/2)
context = zmq.Context()
socket = context.socket(zmq.PUB)
socket.bind(CHANNEL_TARGETDATA)
def search():
global frame, renderFrame
img = cam.getImage()
objective = img.colorDistance(color=(255,255,255)).invert()
seg_objective = objective.stretch(200,255)
blobs = seg_objective.findBlobs()
if blobs:
center_point = (blobs[-1].x, blobs[-1].y)
if frame is renderFrame:
img.drawCircle((blobs[-1].x, blobs[-1].y), 10,SimpleCV.Color.YELLOW,3)
img.dl().rectangle2pts((xTgt[0], yTgt[0]), (xTgt[1],yTgt[1]), box_clr)
img.show()
frame = 0
frame +=1
return center_point
if frame is renderFrame:
img.dl().rectangle2pts((xTgt[0], yTgt[0]), (xTgt[1],yTgt[1]), box_clr)
img.show()
frame = 0
frame +=1
return None
#get current time in milliseconds
millis = lambda: int(round(time.time() * 1000))
#############################################################
# RUNNING CODE BELOW #
#############################################################
tar_x = 0
tar_y = 0
deg_x = 0
deg_y = 0
last_tar = tar_x
found = False
findTime = 0
lastFound = findTime
lossReported = False
while display.isNotDone():
target = search()
if target is not None:
tar_x = target[0]-width/2
tar_y = target[1]-height/2
findTime = millis()
found = True
lossReported = False
else:
found = False
lastFound = findTime
# Angular difference between the box and the target
# Having the target within the box is acceptable
if abs(tar_x) > box_d:
deg_x = tar_x * dpx
else:
deg_x = 0
if abs(tar_y) > box_d:
deg_y = tar_y * dpx
else:
deg_y = 0
# If the target is in the box, indicate this with the box colour
if deg_y is 0 and deg_x is 0 and found:
box_clr = SimpleCV.Color.GREEN
else:
box_clr = SimpleCV.Color.RED
#output the data
# not needed if there's no new data to report
if not lossReported:
message = {
't' : millis(),
'findTime': findTime,
'found' : found,
'tar_px' : {'x':tar_x, 'y':tar_y},
'tar_dg' : {'x':deg_x, 'y':deg_y}
}
# wait 20 ms to make sure Scan picks up on the last hit
if not found and millis() - findTime < 100:
continue
socket.send_json(message)
print "Sent targetData: ",
print message
if lastFound == findTime:
lossReported = False
#spam to keep data flowing
| dotCID/Graduation | Robot code/Sensors/simpleCV_3.py | Python | gpl-2.0 | 4,363 |
"""
RPyC connection factories: ease the creation of a connection for the common
cases)
"""
import socket
import threading
try:
from thread import interrupt_main
except ImportError:
try:
from _thread import interrupt_main
except ImportError:
# assume jython (#83)
from java.lang import System
interrupt_main = System.exit
from rpyc import Connection, Channel, SocketStream, TunneledSocketStream, PipeStream, VoidService
from rpyc.utils.registry import UDPRegistryClient
from rpyc.lib import safe_import
ssl = safe_import("ssl")
class DiscoveryError(Exception):
pass
#------------------------------------------------------------------------------
# API
#------------------------------------------------------------------------------
def connect_channel(channel, service = VoidService, config = {}):
"""creates a connection over a given channel
:param channel: the channel to use
:param service: the local service to expose (defaults to Void)
:param config: configuration dict
:returns: an RPyC connection
"""
return Connection(service, channel, config = config)
def connect_stream(stream, service = VoidService, config = {}):
"""creates a connection over a given stream
:param stream: the stream to use
:param service: the local service to expose (defaults to Void)
:param config: configuration dict
:returns: an RPyC connection
"""
return connect_channel(Channel(stream), service = service, config = config)
def connect_pipes(input, output, service = VoidService, config = {}):
"""
creates a connection over the given input/output pipes
:param input: the input pipe
:param output: the output pipe
:param service: the local service to expose (defaults to Void)
:param config: configuration dict
:returns: an RPyC connection
"""
return connect_stream(PipeStream(input, output), service = service, config = config)
def connect_stdpipes(service = VoidService, config = {}):
"""
creates a connection over the standard input/output pipes
:param service: the local service to expose (defaults to Void)
:param config: configuration dict
:returns: an RPyC connection
"""
return connect_stream(PipeStream.from_std(), service = service, config = config)
def connect(host, port, service = VoidService, config = {}, ipv6 = False, keepalive = False):
"""
creates a socket-connection to the given host and port
:param host: the hostname to connect to
:param port: the TCP port to use
:param service: the local service to expose (defaults to Void)
:param config: configuration dict
:param ipv6: whether to use IPv6 or not
:returns: an RPyC connection
"""
s = SocketStream.connect(host, port, ipv6 = ipv6, keepalive = keepalive)
return connect_stream(s, service, config)
def ssl_connect(host, port, keyfile = None, certfile = None, ca_certs = None,
cert_reqs = None, ssl_version = None, ciphers = None,
service = VoidService, config = {}, ipv6 = False, keepalive = False):
"""
creates an SSL-wrapped connection to the given host (encrypted and
authenticated).
:param host: the hostname to connect to
:param port: the TCP port to use
:param service: the local service to expose (defaults to Void)
:param config: configuration dict
:param ipv6: whether to create an IPv6 socket or an IPv4 one
The following arguments are passed directly to
`ssl.wrap_socket <http://docs.python.org/dev/library/ssl.html#ssl.wrap_socket>`_:
:param keyfile: see ``ssl.wrap_socket``. May be ``None``
:param certfile: see ``ssl.wrap_socket``. May be ``None``
:param ca_certs: see ``ssl.wrap_socket``. May be ``None``
:param cert_reqs: see ``ssl.wrap_socket``. By default, if ``ca_cert`` is specified,
the requirement is set to ``CERT_REQUIRED``; otherwise it is
set to ``CERT_NONE``
:param ssl_version: see ``ssl.wrap_socket``. The default is ``PROTOCOL_TLSv1``
:param ciphers: see ``ssl.wrap_socket``. May be ``None``. New in Python 2.7/3.2
:returns: an RPyC connection
"""
ssl_kwargs = {"server_side" : False}
if keyfile is not None:
ssl_kwargs["keyfile"] = keyfile
if certfile is not None:
ssl_kwargs["certfile"] = certfile
if ca_certs is not None:
ssl_kwargs["ca_certs"] = ca_certs
ssl_kwargs["cert_reqs"] = ssl.CERT_REQUIRED
if cert_reqs is not None:
ssl_kwargs["cert_reqs"] = cert_reqs
if ssl_version is None:
ssl_kwargs["ssl_version"] = ssl.PROTOCOL_TLSv1
else:
ssl_kwargs["ssl_version"] = ssl_version
if ciphers is not None:
ssl_kwargs["ciphers"] = ciphers
s = SocketStream.ssl_connect(host, port, ssl_kwargs, ipv6 = ipv6, keepalive = keepalive)
return connect_stream(s, service, config)
def _get_free_port():
"""attempts to find a free port"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("localhost", 0))
_, port = s.getsockname()
s.close()
return port
_ssh_connect_lock = threading.Lock()
def ssh_connect(remote_machine, remote_port, service = VoidService, config = {}):
"""
Connects to an RPyC server over an SSH tunnel (created by plumbum).
See `Plumbum tunneling <http://plumbum.readthedocs.org/en/latest/remote.html#tunneling>`_
for further details.
.. note::
This function attempts to allocate a free TCP port for the underlying tunnel, but doing
so is inherently prone to a race condition with other processes who might bind the
same port before sshd does. Albeit unlikely, there is no sure way around it.
:param remote_machine: an :class:`plumbum.remote.RemoteMachine` instance
:param remote_port: the port of the remote server
:param service: the local service to expose (defaults to Void)
:param config: configuration dict
:returns: an RPyC connection
"""
with _ssh_connect_lock:
loc_port = _get_free_port()
tun = remote_machine.tunnel(loc_port, remote_port)
stream = TunneledSocketStream.connect("localhost", loc_port)
stream.tun = tun
return Connection(service, Channel(stream), config = config)
def discover(service_name, host = None, registrar = None, timeout = 2):
"""
discovers hosts running the given service
:param service_name: the service to look for
:param host: limit the discovery to the given host only (None means any host)
:param registrar: use this registry client to discover services. if None,
use the default UDPRegistryClient with the default settings.
:param timeout: the number of seconds to wait for a reply from the registry
if no hosts are found, raises DiscoveryError
:raises: ``DiscoveryError`` if no server is found
:returns: a list of (ip, port) pairs
"""
if registrar is None:
registrar = UDPRegistryClient(timeout = timeout)
addrs = registrar.discover(service_name)
if not addrs:
raise DiscoveryError("no servers exposing %r were found" % (service_name,))
if host:
ips = socket.gethostbyname_ex(host)[2]
addrs = [(h, p) for h, p in addrs if h in ips]
if not addrs:
raise DiscoveryError("no servers exposing %r were found on %r" % (service_name, host))
return addrs
def connect_by_service(service_name, host = None, service = VoidService, config = {}):
"""create a connection to an arbitrary server that exposes the requested service
:param service_name: the service to discover
:param host: limit discovery to the given host only (None means any host)
:param service: the local service to expose (defaults to Void)
:param config: configuration dict
:raises: ``DiscoveryError`` if no server is found
:returns: an RPyC connection
"""
host, port = discover(service_name, host = host)[0]
return connect(host, port, service, config = config)
def connect_subproc(args, service = VoidService, config = {}):
"""runs an rpyc server on a child process that and connects to it over
the stdio pipes. uses the subprocess module.
:param args: the args to Popen, e.g., ["python", "-u", "myfile.py"]
:param service: the local service to expose (defaults to Void)
:param config: configuration dict
"""
from subprocess import Popen, PIPE
proc = Popen(args, stdin = PIPE, stdout = PIPE)
conn = connect_pipes(proc.stdout, proc.stdin, service = service, config = config)
conn.proc = proc # just so you can have control over the processs
return conn
def connect_thread(service = VoidService, config = {}, remote_service = VoidService, remote_config = {}):
"""starts an rpyc server on a new thread, bound to an arbitrary port,
and connects to it over a socket.
:param service: the local service to expose (defaults to Void)
:param config: configuration dict
:param server_service: the remote service to expose (of the server; defaults to Void)
:param server_config: remote configuration dict (of the server)
"""
listener = socket.socket()
listener.bind(("localhost", 0))
listener.listen(1)
def server(listener = listener):
client = listener.accept()[0]
listener.close()
conn = connect_stream(SocketStream(client), service = remote_service,
config = remote_config)
try:
conn.serve_all()
except KeyboardInterrupt:
interrupt_main()
t = threading.Thread(target = server)
t.setDaemon(True)
t.start()
host, port = listener.getsockname()
return connect(host, port, service = service, config = config)
def connect_multiprocess(service = VoidService, config = {}, remote_service = VoidService, remote_config = {}, args={}):
"""starts an rpyc server on a new process, bound to an arbitrary port,
and connects to it over a socket. Basically a copy of connect_thread().
However if args is used and if these are shared memory then changes
will be bi-directional. That is we now have access to shared memmory.
:param service: the local service to expose (defaults to Void)
:param config: configuration dict
:param server_service: the remote service to expose (of the server; defaults to Void)
:param server_config: remote configuration dict (of the server)
:param args: dict of local vars to pass to new connection, form {'name':var}
Contributed by *@tvanzyl*
"""
from multiprocessing import Process
listener = socket.socket()
listener.bind(("localhost", 0))
listener.listen(1)
def server(listener=listener, args=args):
client = listener.accept()[0]
listener.close()
conn = connect_stream(SocketStream(client), service = remote_service, config = remote_config)
try:
for k in args:
conn._local_root.exposed_namespace[k] = args[k]
conn.serve_all()
except KeyboardInterrupt:
interrupt_main()
t = Process(target = server)
t.start()
host, port = listener.getsockname()
return connect(host, port, service = service, config = config)
| sovaa/backdoorme | rpyc/utils/factory.py | Python | mit | 11,382 |
#!/usr/bin/env python
from netmiko import ConnectHandler
from getpass import getpass
password = '88newclass'
pynet1 = {
'device_type': 'cisco_ios',
'ip': '184.105.247.70',
'username': 'pyclass',
'password': password,
'port': 22
}
pynet2 = {
'device_type': 'cisco_ios',
'ip': '184.105.247.71',
'username': 'pyclass',
'password': password,
'secret': '',
'port': 22
}
juniper_srx = {
'device_type': 'juniper',
'ip': '184.105.247.76',
'username': 'pyclass',
'password': password,
'port': 22
}
pynet_rtr1 = ConnectHandler(**pynet1)
pynet_rtr2 = ConnectHandler(**pynet2)
pynet_srx1 = ConnectHandler(**juniper_srx)
pynet_rtr1.config_mode()
if(pynet_rtr1.check_config_mode() == True):
print("Currently in config mode")
| gerards/pynet_network_automation_course | week4/q5_netmiko.py | Python | apache-2.0 | 737 |
"""
Implement python 3.8+ bytecode analysis
"""
from pprint import pformat
import logging
from collections import namedtuple, defaultdict, deque
from functools import total_ordering
from numba.core.utils import UniqueDict, PYVERSION
from numba.core.controlflow import NEW_BLOCKERS, CFGraph
from numba.core.ir import Loc
from numba.core.errors import UnsupportedError
_logger = logging.getLogger(__name__)
_EXCEPT_STACK_OFFSET = 6
_FINALLY_POP = _EXCEPT_STACK_OFFSET if PYVERSION >= (3, 8) else 1
_NO_RAISE_OPS = frozenset({
'LOAD_CONST',
})
@total_ordering
class BlockKind(object):
"""Kinds of block to make related code safer than just `str`.
"""
_members = frozenset({
'LOOP',
'TRY', 'EXCEPT', 'FINALLY',
'WITH', 'WITH_FINALLY',
})
def __init__(self, value):
assert value in self._members
self._value = value
def __hash__(self):
return hash((type(self), self._value))
def __lt__(self, other):
if isinstance(other, BlockKind):
return self._value < other._value
else:
raise TypeError('cannot compare to {!r}'.format(type(other)))
def __eq__(self, other):
if isinstance(other, BlockKind):
return self._value == other._value
else:
raise TypeError('cannot compare to {!r}'.format(type(other)))
def __repr__(self):
return "BlockKind({})".format(self._value)
class _lazy_pformat(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def __str__(self):
return pformat(*self.args, **self.kwargs)
class Flow(object):
"""Data+Control Flow analysis.
Simulate execution to recover dataflow and controlflow information.
"""
def __init__(self, bytecode):
_logger.debug("bytecode dump:\n%s", bytecode.dump())
self._bytecode = bytecode
self.block_infos = UniqueDict()
def run(self):
"""Run a trace over the bytecode over all reachable path.
The trace starts at bytecode offset 0 and gathers stack and control-
flow information by partially interpreting each bytecode.
Each ``State`` instance in the trace corresponds to a basic-block.
The State instances forks when a jump instruction is encountered.
A newly forked state is then added to the list of pending states.
The trace ends when there are no more pending states.
"""
firststate = State(bytecode=self._bytecode, pc=0, nstack=0,
blockstack=())
runner = TraceRunner(debug_filename=self._bytecode.func_id.filename)
runner.pending.append(firststate)
# Enforce unique-ness on initial PC to avoid re-entering the PC with
# a different stack-depth. We don't know if such a case is ever
# possible, but no such case has been encountered in our tests.
first_encounter = UniqueDict()
# Loop over each pending state at a initial PC.
# Each state is tracing a basic block
while runner.pending:
_logger.debug("pending: %s", runner.pending)
state = runner.pending.popleft()
if state not in runner.finished:
_logger.debug("stack: %s", state._stack)
first_encounter[state.pc_initial] = state
# Loop over the state until it is terminated.
while True:
runner.dispatch(state)
# Terminated?
if state.has_terminated():
break
elif (state.has_active_try() and
state.get_inst().opname not in _NO_RAISE_OPS):
# Is in a *try* block
state.fork(pc=state.get_inst().next)
tryblk = state.get_top_block('TRY')
state.pop_block_and_above(tryblk)
nstack = state.stack_depth
kwargs = {}
if nstack > tryblk['entry_stack']:
kwargs['npop'] = nstack - tryblk['entry_stack']
handler = tryblk['handler']
kwargs['npush'] = {
BlockKind('EXCEPT'): _EXCEPT_STACK_OFFSET,
BlockKind('FINALLY'): _FINALLY_POP
}[handler['kind']]
kwargs['extra_block'] = handler
state.fork(pc=tryblk['end'], **kwargs)
break
else:
state.advance_pc()
# Must the new PC be a new block?
if self._is_implicit_new_block(state):
state.split_new_block()
break
_logger.debug("end state. edges=%s", state.outgoing_edges)
runner.finished.add(state)
out_states = state.get_outgoing_states()
runner.pending.extend(out_states)
# Complete controlflow
self._build_cfg(runner.finished)
# Prune redundant PHI-nodes
self._prune_phis(runner)
# Post process
for state in sorted(runner.finished, key=lambda x: x.pc_initial):
self.block_infos[state.pc_initial] = si = adapt_state_infos(state)
_logger.debug("block_infos %s:\n%s", state, si)
def _build_cfg(self, all_states):
graph = CFGraph()
for state in all_states:
b = state.pc_initial
graph.add_node(b)
for state in all_states:
for edge in state.outgoing_edges:
graph.add_edge(state.pc_initial, edge.pc, 0)
graph.set_entry_point(0)
graph.process()
self.cfgraph = graph
def _prune_phis(self, runner):
# Find phis that are unused in the local block
_logger.debug("Prune PHIs".center(60, '-'))
# Compute dataflow for used phis and propagate
# 1. Get used-phis for each block
# Map block to used_phis
def get_used_phis_per_state():
used_phis = defaultdict(set)
phi_set = set()
for state in runner.finished:
used = set(state._used_regs)
phis = set(state._phis)
used_phis[state] |= phis & used
phi_set |= phis
return used_phis, phi_set
# Find use-defs
def find_use_defs():
defmap = {}
phismap = defaultdict(set)
for state in runner.finished:
for phi, rhs in state._outgoing_phis.items():
if rhs not in phi_set:
# Is a definition
defmap[phi] = state
phismap[phi].add((rhs, state))
_logger.debug("defmap: %s", _lazy_pformat(defmap))
_logger.debug("phismap: %s", _lazy_pformat(phismap))
return defmap, phismap
def propagate_phi_map(phismap):
"""An iterative dataflow algorithm to find the definition
(the source) of each PHI node.
"""
blacklist = defaultdict(set)
while True:
changing = False
for phi, defsites in sorted(list(phismap.items())):
for rhs, state in sorted(list(defsites)):
if rhs in phi_set:
defsites |= phismap[rhs]
blacklist[phi].add((rhs, state))
to_remove = blacklist[phi]
if to_remove & defsites:
defsites -= to_remove
changing = True
_logger.debug("changing phismap: %s", _lazy_pformat(phismap))
if not changing:
break
def apply_changes(used_phis, phismap):
keep = {}
for state, used_set in used_phis.items():
for phi in used_set:
keep[phi] = phismap[phi]
_logger.debug("keep phismap: %s", _lazy_pformat(keep))
new_out = defaultdict(dict)
for phi in keep:
for rhs, state in keep[phi]:
new_out[state][phi] = rhs
_logger.debug("new_out: %s", _lazy_pformat(new_out))
for state in runner.finished:
state._outgoing_phis.clear()
state._outgoing_phis.update(new_out[state])
used_phis, phi_set = get_used_phis_per_state()
_logger.debug("Used_phis: %s", _lazy_pformat(used_phis))
defmap, phismap = find_use_defs()
propagate_phi_map(phismap)
apply_changes(used_phis, phismap)
_logger.debug("DONE Prune PHIs".center(60, '-'))
def _is_implicit_new_block(self, state):
inst = state.get_inst()
if inst.offset in self._bytecode.labels:
return True
elif inst.opname in NEW_BLOCKERS:
return True
else:
return False
class TraceRunner(object):
"""Trace runner contains the states for the trace and the opcode dispatch.
"""
def __init__(self, debug_filename):
self.debug_filename = debug_filename
self.pending = deque()
self.finished = set()
def get_debug_loc(self, lineno):
return Loc(self.debug_filename, lineno)
def dispatch(self, state):
inst = state.get_inst()
_logger.debug("dispatch pc=%s, inst=%s", state._pc, inst)
_logger.debug("stack %s", state._stack)
fn = getattr(self, "op_{}".format(inst.opname), None)
if fn is not None:
fn(state, inst)
else:
msg = "Use of unsupported opcode (%s) found" % inst.opname
raise UnsupportedError(msg, loc=self.get_debug_loc(inst.lineno))
def op_NOP(self, state, inst):
state.append(inst)
def op_POP_TOP(self, state, inst):
state.pop()
def op_LOAD_GLOBAL(self, state, inst):
res = state.make_temp()
state.append(inst, res=res)
state.push(res)
def op_LOAD_DEREF(self, state, inst):
res = state.make_temp()
state.append(inst, res=res)
state.push(res)
def op_LOAD_CONST(self, state, inst):
res = state.make_temp("const")
state.push(res)
state.append(inst, res=res)
def op_LOAD_ATTR(self, state, inst):
item = state.pop()
res = state.make_temp()
state.append(inst, item=item, res=res)
state.push(res)
def op_LOAD_FAST(self, state, inst):
name = state.get_varname(inst)
res = state.make_temp(name)
state.append(inst, res=res)
state.push(res)
def op_DELETE_FAST(self, state, inst):
state.append(inst)
def op_DELETE_ATTR(self, state, inst):
target = state.pop()
state.append(inst, target=target)
def op_STORE_ATTR(self, state, inst):
target = state.pop()
value = state.pop()
state.append(inst, target=target, value=value)
def op_STORE_DEREF(self, state, inst):
value = state.pop()
state.append(inst, value=value)
def op_STORE_FAST(self, state, inst):
value = state.pop()
state.append(inst, value=value)
def op_SLICE_1(self, state, inst):
"""
TOS = TOS1[TOS:]
"""
tos = state.pop()
tos1 = state.pop()
res = state.make_temp()
slicevar = state.make_temp()
indexvar = state.make_temp()
nonevar = state.make_temp()
state.append(
inst,
base=tos1,
start=tos,
res=res,
slicevar=slicevar,
indexvar=indexvar,
nonevar=nonevar,
)
state.push(res)
def op_SLICE_2(self, state, inst):
"""
TOS = TOS1[:TOS]
"""
tos = state.pop()
tos1 = state.pop()
res = state.make_temp()
slicevar = state.make_temp()
indexvar = state.make_temp()
nonevar = state.make_temp()
state.append(
inst,
base=tos1,
stop=tos,
res=res,
slicevar=slicevar,
indexvar=indexvar,
nonevar=nonevar,
)
state.push(res)
def op_SLICE_3(self, state, inst):
"""
TOS = TOS2[TOS1:TOS]
"""
tos = state.pop()
tos1 = state.pop()
tos2 = state.pop()
res = state.make_temp()
slicevar = state.make_temp()
indexvar = state.make_temp()
state.append(
inst,
base=tos2,
start=tos1,
stop=tos,
res=res,
slicevar=slicevar,
indexvar=indexvar,
)
state.push(res)
def op_STORE_SLICE_0(self, state, inst):
"""
TOS[:] = TOS1
"""
tos = state.pop()
value = state.pop()
slicevar = state.make_temp()
indexvar = state.make_temp()
nonevar = state.make_temp()
state.append(
inst,
base=tos,
value=value,
slicevar=slicevar,
indexvar=indexvar,
nonevar=nonevar,
)
def op_STORE_SLICE_1(self, state, inst):
"""
TOS1[TOS:] = TOS2
"""
tos = state.pop()
tos1 = state.pop()
value = state.pop()
slicevar = state.make_temp()
indexvar = state.make_temp()
nonevar = state.make_temp()
state.append(
inst,
base=tos1,
start=tos,
slicevar=slicevar,
value=value,
indexvar=indexvar,
nonevar=nonevar,
)
def op_STORE_SLICE_2(self, state, inst):
"""
TOS1[:TOS] = TOS2
"""
tos = state.pop()
tos1 = state.pop()
value = state.pop()
slicevar = state.make_temp()
indexvar = state.make_temp()
nonevar = state.make_temp()
state.append(
inst,
base=tos1,
stop=tos,
value=value,
slicevar=slicevar,
indexvar=indexvar,
nonevar=nonevar,
)
def op_STORE_SLICE_3(self, state, inst):
"""
TOS2[TOS1:TOS] = TOS3
"""
tos = state.pop()
tos1 = state.pop()
tos2 = state.pop()
value = state.pop()
slicevar = state.make_temp()
indexvar = state.make_temp()
state.append(
inst,
base=tos2,
start=tos1,
stop=tos,
value=value,
slicevar=slicevar,
indexvar=indexvar,
)
def op_DELETE_SLICE_0(self, state, inst):
"""
del TOS[:]
"""
tos = state.pop()
slicevar = state.make_temp()
indexvar = state.make_temp()
nonevar = state.make_temp()
state.append(
inst, base=tos, slicevar=slicevar, indexvar=indexvar,
nonevar=nonevar,
)
def op_DELETE_SLICE_1(self, state, inst):
"""
del TOS1[TOS:]
"""
tos = state.pop()
tos1 = state.pop()
slicevar = state.make_temp()
indexvar = state.make_temp()
nonevar = state.make_temp()
state.append(
inst,
base=tos1,
start=tos,
slicevar=slicevar,
indexvar=indexvar,
nonevar=nonevar,
)
def op_DELETE_SLICE_2(self, state, inst):
"""
del TOS1[:TOS]
"""
tos = state.pop()
tos1 = state.pop()
slicevar = state.make_temp()
indexvar = state.make_temp()
nonevar = state.make_temp()
state.append(
inst,
base=tos1,
stop=tos,
slicevar=slicevar,
indexvar=indexvar,
nonevar=nonevar,
)
def op_DELETE_SLICE_3(self, state, inst):
"""
del TOS2[TOS1:TOS]
"""
tos = state.pop()
tos1 = state.pop()
tos2 = state.pop()
slicevar = state.make_temp()
indexvar = state.make_temp()
state.append(
inst, base=tos2, start=tos1, stop=tos, slicevar=slicevar,
indexvar=indexvar
)
def op_BUILD_SLICE(self, state, inst):
"""
slice(TOS1, TOS) or slice(TOS2, TOS1, TOS)
"""
argc = inst.arg
if argc == 2:
tos = state.pop()
tos1 = state.pop()
start = tos1
stop = tos
step = None
elif argc == 3:
tos = state.pop()
tos1 = state.pop()
tos2 = state.pop()
start = tos2
stop = tos1
step = tos
else:
raise Exception("unreachable")
slicevar = state.make_temp()
res = state.make_temp()
state.append(
inst, start=start, stop=stop, step=step, res=res, slicevar=slicevar
)
state.push(res)
def _op_POP_JUMP_IF(self, state, inst):
pred = state.pop()
state.append(inst, pred=pred)
state.fork(pc=inst.next)
state.fork(pc=inst.get_jump_target())
op_POP_JUMP_IF_TRUE = _op_POP_JUMP_IF
op_POP_JUMP_IF_FALSE = _op_POP_JUMP_IF
def _op_JUMP_IF_OR_POP(self, state, inst):
pred = state.get_tos()
state.append(inst, pred=pred)
state.fork(pc=inst.next, npop=1)
state.fork(pc=inst.get_jump_target())
op_JUMP_IF_FALSE_OR_POP = _op_JUMP_IF_OR_POP
op_JUMP_IF_TRUE_OR_POP = _op_JUMP_IF_OR_POP
def op_JUMP_FORWARD(self, state, inst):
state.append(inst)
state.fork(pc=inst.get_jump_target())
def op_JUMP_ABSOLUTE(self, state, inst):
state.append(inst)
state.fork(pc=inst.get_jump_target())
def op_BREAK_LOOP(self, state, inst):
# NOTE: bytecode removed since py3.8
end = state.get_top_block('LOOP')['end']
state.append(inst, end=end)
state.pop_block()
state.fork(pc=end)
def op_RETURN_VALUE(self, state, inst):
state.append(inst, retval=state.pop(), castval=state.make_temp())
state.terminate()
def op_YIELD_VALUE(self, state, inst):
val = state.pop()
res = state.make_temp()
state.append(inst, value=val, res=res)
state.push(res)
def op_RAISE_VARARGS(self, state, inst):
in_exc_block = any([
state.get_top_block("EXCEPT") is not None,
state.get_top_block("FINALLY") is not None
])
if inst.arg == 0:
exc = None
if in_exc_block:
raise UnsupportedError(
"The re-raising of an exception is not yet supported.",
loc=self.get_debug_loc(inst.lineno),
)
elif inst.arg == 1:
exc = state.pop()
else:
raise ValueError("Multiple argument raise is not supported.")
state.append(inst, exc=exc)
state.terminate()
def op_BEGIN_FINALLY(self, state, inst):
temps = []
for i in range(_EXCEPT_STACK_OFFSET):
tmp = state.make_temp()
temps.append(tmp)
state.push(tmp)
state.append(inst, temps=temps)
def op_END_FINALLY(self, state, inst):
blk = state.pop_block()
state.reset_stack(blk['entry_stack'])
def op_POP_FINALLY(self, state, inst):
# we don't emulate the exact stack behavior
if inst.arg != 0:
msg = ('Unsupported use of a bytecode related to try..finally'
' or a with-context')
raise UnsupportedError(msg, loc=self.get_debug_loc(inst.lineno))
def op_CALL_FINALLY(self, state, inst):
pass
def op_WITH_CLEANUP_START(self, state, inst):
# we don't emulate the exact stack behavior
state.append(inst)
def op_WITH_CLEANUP_FINISH(self, state, inst):
# we don't emulate the exact stack behavior
state.append(inst)
def op_SETUP_LOOP(self, state, inst):
# NOTE: bytecode removed since py3.8
state.push_block(
state.make_block(
kind='LOOP',
end=inst.get_jump_target(),
)
)
def op_SETUP_WITH(self, state, inst):
cm = state.pop() # the context-manager
yielded = state.make_temp()
state.append(inst, contextmanager=cm)
state.push_block(
state.make_block(
kind='WITH_FINALLY',
end=inst.get_jump_target(),
)
)
state.push(cm)
state.push(yielded)
state.push_block(
state.make_block(
kind='WITH',
end=inst.get_jump_target(),
)
)
# Forces a new block
state.fork(pc=inst.next)
def _setup_try(self, kind, state, next, end):
handler_block = state.make_block(
kind=kind,
end=None,
reset_stack=False,
)
# Forces a new block
# Fork to the body of the finally
state.fork(
pc=next,
extra_block=state.make_block(
kind='TRY',
end=end,
reset_stack=False,
handler=handler_block,
)
)
def op_SETUP_EXCEPT(self, state, inst):
# Opcode removed since py3.8
state.append(inst)
self._setup_try(
'EXCEPT', state, next=inst.next, end=inst.get_jump_target(),
)
def op_SETUP_FINALLY(self, state, inst):
state.append(inst)
self._setup_try(
'FINALLY', state, next=inst.next, end=inst.get_jump_target(),
)
def op_POP_EXCEPT(self, state, inst):
blk = state.pop_block()
if blk['kind'] not in {BlockKind('EXCEPT'), BlockKind('FINALLY')}:
raise UnsupportedError(
"POP_EXCEPT got an unexpected block: {}".format(blk['kind']),
loc=self.get_debug_loc(inst.lineno),
)
state.pop()
state.pop()
state.pop()
# Forces a new block
state.fork(pc=inst.next)
def op_POP_BLOCK(self, state, inst):
blk = state.pop_block()
if blk['kind'] == BlockKind('TRY'):
state.append(inst, kind='try')
# Forces a new block
state.fork(pc=inst.next)
def op_BINARY_SUBSCR(self, state, inst):
index = state.pop()
target = state.pop()
res = state.make_temp()
state.append(inst, index=index, target=target, res=res)
state.push(res)
def op_STORE_SUBSCR(self, state, inst):
index = state.pop()
target = state.pop()
value = state.pop()
state.append(inst, target=target, index=index, value=value)
def op_DELETE_SUBSCR(self, state, inst):
index = state.pop()
target = state.pop()
state.append(inst, target=target, index=index)
def op_CALL_FUNCTION(self, state, inst):
narg = inst.arg
args = list(reversed([state.pop() for _ in range(narg)]))
func = state.pop()
res = state.make_temp()
state.append(inst, func=func, args=args, res=res)
state.push(res)
def op_CALL_FUNCTION_KW(self, state, inst):
narg = inst.arg
names = state.pop() # tuple of names
args = list(reversed([state.pop() for _ in range(narg)]))
func = state.pop()
res = state.make_temp()
state.append(inst, func=func, args=args, names=names, res=res)
state.push(res)
def op_CALL_FUNCTION_EX(self, state, inst):
if inst.arg & 1:
errmsg = "CALL_FUNCTION_EX with **kwargs not supported"
raise UnsupportedError(errmsg)
vararg = state.pop()
func = state.pop()
res = state.make_temp()
state.append(inst, func=func, vararg=vararg, res=res)
state.push(res)
def _dup_topx(self, state, inst, count):
orig = [state.pop() for _ in range(count)]
orig.reverse()
# We need to actually create new temporaries if we want the
# IR optimization pass to work correctly (see issue #580)
duped = [state.make_temp() for _ in range(count)]
state.append(inst, orig=orig, duped=duped)
for val in orig:
state.push(val)
for val in duped:
state.push(val)
def op_DUP_TOPX(self, state, inst):
count = inst.arg
assert 1 <= count <= 5, "Invalid DUP_TOPX count"
self._dup_topx(state, inst, count)
def op_DUP_TOP(self, state, inst):
self._dup_topx(state, inst, count=1)
def op_DUP_TOP_TWO(self, state, inst):
self._dup_topx(state, inst, count=2)
def op_ROT_TWO(self, state, inst):
first = state.pop()
second = state.pop()
state.push(first)
state.push(second)
def op_ROT_THREE(self, state, inst):
first = state.pop()
second = state.pop()
third = state.pop()
state.push(first)
state.push(third)
state.push(second)
def op_ROT_FOUR(self, state, inst):
first = state.pop()
second = state.pop()
third = state.pop()
forth = state.pop()
state.push(first)
state.push(forth)
state.push(third)
state.push(second)
def op_UNPACK_SEQUENCE(self, state, inst):
count = inst.arg
iterable = state.pop()
stores = [state.make_temp() for _ in range(count)]
tupleobj = state.make_temp()
state.append(inst, iterable=iterable, stores=stores, tupleobj=tupleobj)
for st in reversed(stores):
state.push(st)
def op_BUILD_TUPLE(self, state, inst):
count = inst.arg
items = list(reversed([state.pop() for _ in range(count)]))
tup = state.make_temp()
state.append(inst, items=items, res=tup)
state.push(tup)
def _build_tuple_unpack(self, state, inst):
# Builds tuple from other tuples on the stack
tuples = list(reversed([state.pop() for _ in range(inst.arg)]))
temps = [state.make_temp() for _ in range(len(tuples) - 1)]
state.append(inst, tuples=tuples, temps=temps)
# The result is in the last temp var
state.push(temps[-1])
def op_BUILD_TUPLE_UNPACK_WITH_CALL(self, state, inst):
# just unpack the input tuple, call inst will be handled afterwards
self._build_tuple_unpack(state, inst)
def op_BUILD_TUPLE_UNPACK(self, state, inst):
self._build_tuple_unpack(state, inst)
def op_BUILD_CONST_KEY_MAP(self, state, inst):
keys = state.pop()
vals = list(reversed([state.pop() for _ in range(inst.arg)]))
keytmps = [state.make_temp() for _ in range(inst.arg)]
res = state.make_temp()
state.append(inst, keys=keys, keytmps=keytmps, values=vals, res=res)
state.push(res)
def op_BUILD_LIST(self, state, inst):
count = inst.arg
items = list(reversed([state.pop() for _ in range(count)]))
lst = state.make_temp()
state.append(inst, items=items, res=lst)
state.push(lst)
def op_LIST_APPEND(self, state, inst):
value = state.pop()
index = inst.arg
target = state.peek(index)
appendvar = state.make_temp()
res = state.make_temp()
state.append(inst, target=target, value=value, appendvar=appendvar,
res=res)
def op_BUILD_MAP(self, state, inst):
dct = state.make_temp()
count = inst.arg
items = []
# In 3.5+, BUILD_MAP takes <count> pairs from the stack
for i in range(count):
v, k = state.pop(), state.pop()
items.append((k, v))
state.append(inst, items=items[::-1], size=count, res=dct)
state.push(dct)
def op_BUILD_SET(self, state, inst):
count = inst.arg
# Note: related python bug http://bugs.python.org/issue26020
items = list(reversed([state.pop() for _ in range(count)]))
res = state.make_temp()
state.append(inst, items=items, res=res)
state.push(res)
def op_GET_ITER(self, state, inst):
value = state.pop()
res = state.make_temp()
state.append(inst, value=value, res=res)
state.push(res)
def op_FOR_ITER(self, state, inst):
iterator = state.get_tos()
pair = state.make_temp()
indval = state.make_temp()
pred = state.make_temp()
state.append(inst, iterator=iterator, pair=pair, indval=indval,
pred=pred)
state.push(indval)
end = inst.get_jump_target()
state.fork(pc=end, npop=2)
state.fork(pc=inst.next)
def _unaryop(self, state, inst):
val = state.pop()
res = state.make_temp()
state.append(inst, value=val, res=res)
state.push(res)
op_UNARY_NEGATIVE = _unaryop
op_UNARY_POSITIVE = _unaryop
op_UNARY_NOT = _unaryop
op_UNARY_INVERT = _unaryop
def _binaryop(self, state, inst):
rhs = state.pop()
lhs = state.pop()
res = state.make_temp()
state.append(inst, lhs=lhs, rhs=rhs, res=res)
state.push(res)
op_COMPARE_OP = _binaryop
op_INPLACE_ADD = _binaryop
op_INPLACE_SUBTRACT = _binaryop
op_INPLACE_MULTIPLY = _binaryop
op_INPLACE_DIVIDE = _binaryop
op_INPLACE_TRUE_DIVIDE = _binaryop
op_INPLACE_FLOOR_DIVIDE = _binaryop
op_INPLACE_MODULO = _binaryop
op_INPLACE_POWER = _binaryop
op_INPLACE_MATRIX_MULTIPLY = _binaryop
op_INPLACE_LSHIFT = _binaryop
op_INPLACE_RSHIFT = _binaryop
op_INPLACE_AND = _binaryop
op_INPLACE_OR = _binaryop
op_INPLACE_XOR = _binaryop
op_BINARY_ADD = _binaryop
op_BINARY_SUBTRACT = _binaryop
op_BINARY_MULTIPLY = _binaryop
op_BINARY_DIVIDE = _binaryop
op_BINARY_TRUE_DIVIDE = _binaryop
op_BINARY_FLOOR_DIVIDE = _binaryop
op_BINARY_MODULO = _binaryop
op_BINARY_POWER = _binaryop
op_BINARY_MATRIX_MULTIPLY = _binaryop
op_BINARY_LSHIFT = _binaryop
op_BINARY_RSHIFT = _binaryop
op_BINARY_AND = _binaryop
op_BINARY_OR = _binaryop
op_BINARY_XOR = _binaryop
def op_MAKE_FUNCTION(self, state, inst, MAKE_CLOSURE=False):
name = state.pop()
code = state.pop()
closure = annotations = kwdefaults = defaults = None
if PYVERSION < (3, 6):
num_posdefaults = inst.arg & 0xFF
num_kwdefaults = (inst.arg >> 8) & 0xFF
num_annotations = (inst.arg >> 16) & 0x7FFF
if MAKE_CLOSURE:
closure = state.pop()
if num_annotations > 0:
annotations = state.pop()
if num_kwdefaults > 0:
kwdefaults = []
for i in range(num_kwdefaults):
v = state.pop()
k = state.pop()
kwdefaults.append((k, v))
kwdefaults = tuple(kwdefaults)
if num_posdefaults:
defaults = []
for i in range(num_posdefaults):
defaults.append(state.pop())
defaults = tuple(defaults)
else:
if inst.arg & 0x8:
closure = state.pop()
if inst.arg & 0x4:
annotations = state.pop()
if inst.arg & 0x2:
kwdefaults = state.pop()
if inst.arg & 0x1:
defaults = state.pop()
res = state.make_temp()
state.append(
inst,
name=name,
code=code,
closure=closure,
annotations=annotations,
kwdefaults=kwdefaults,
defaults=defaults,
res=res,
)
state.push(res)
def op_MAKE_CLOSURE(self, state, inst):
self.op_MAKE_FUNCTION(state, inst, MAKE_CLOSURE=True)
def op_LOAD_CLOSURE(self, state, inst):
res = state.make_temp()
state.append(inst, res=res)
state.push(res)
# NOTE: Please see notes in `interpreter.py` surrounding the implementation
# of LOAD_METHOD and CALL_METHOD.
def op_LOAD_METHOD(self, state, inst):
self.op_LOAD_ATTR(state, inst)
def op_CALL_METHOD(self, state, inst):
self.op_CALL_FUNCTION(state, inst)
@total_ordering
class State(object):
"""State of the trace
"""
def __init__(self, bytecode, pc, nstack, blockstack):
"""
Parameters
----------
bytecode : numba.bytecode.ByteCode
function bytecode
pc : int
program counter
nstack : int
stackdepth at entry
blockstack : Sequence[Dict]
A sequence of dictionary denoting entries on the blockstack.
"""
self._bytecode = bytecode
self._pc_initial = pc
self._pc = pc
self._nstack_initial = nstack
self._stack = []
self._blockstack_initial = tuple(blockstack)
self._blockstack = list(blockstack)
self._temp_registers = []
self._insts = []
self._outedges = []
self._terminated = False
self._phis = {}
self._outgoing_phis = UniqueDict()
self._used_regs = set()
for i in range(nstack):
phi = self.make_temp("phi")
self._phis[phi] = i
self.push(phi)
def __repr__(self):
return "State(pc_initial={} nstack_initial={})".format(
self._pc_initial, self._nstack_initial
)
def get_identity(self):
return (self._pc_initial, self._nstack_initial)
def __hash__(self):
return hash(self.get_identity())
def __lt__(self, other):
return self.get_identity() < other.get_identity()
def __eq__(self, other):
return self.get_identity() == other.get_identity()
@property
def pc_initial(self):
"""The starting bytecode offset of this State.
The PC given to the constructor.
"""
return self._pc_initial
@property
def instructions(self):
"""The list of instructions information as a 2-tuple of
``(pc : int, register_map : Dict)``
"""
return self._insts
@property
def outgoing_edges(self):
"""The list of outgoing edges.
Returns
-------
edges : List[State]
"""
return self._outedges
@property
def outgoing_phis(self):
"""The dictionary of outgoing phi nodes.
The keys are the name of the PHI nodes.
The values are the outgoing states.
"""
return self._outgoing_phis
@property
def blockstack_initial(self):
"""A copy of the initial state of the blockstack
"""
return self._blockstack_initial
@property
def stack_depth(self):
"""The current size of the stack
Returns
-------
res : int
"""
return len(self._stack)
def find_initial_try_block(self):
"""Find the initial *try* block.
"""
for blk in reversed(self._blockstack_initial):
if blk['kind'] == BlockKind('TRY'):
return blk
def has_terminated(self):
return self._terminated
def get_inst(self):
return self._bytecode[self._pc]
def advance_pc(self):
inst = self.get_inst()
self._pc = inst.next
def make_temp(self, prefix=""):
if not prefix:
name = "${prefix}{offset}{opname}.{tempct}".format(
prefix=prefix,
offset=self._pc,
opname=self.get_inst().opname.lower(),
tempct=len(self._temp_registers),
)
else:
name = "${prefix}{offset}.{tempct}".format(
prefix=prefix,
offset=self._pc,
tempct=len(self._temp_registers),
)
self._temp_registers.append(name)
return name
def append(self, inst, **kwargs):
"""Append new inst"""
self._insts.append((inst.offset, kwargs))
self._used_regs |= set(_flatten_inst_regs(kwargs.values()))
def get_tos(self):
return self.peek(1)
def peek(self, k):
"""Return the k'th element on the stack
"""
return self._stack[-k]
def push(self, item):
"""Push to stack"""
self._stack.append(item)
def pop(self):
"""Pop the stack"""
return self._stack.pop()
def push_block(self, synblk):
"""Push a block to blockstack
"""
assert 'stack_depth' in synblk
self._blockstack.append(synblk)
def reset_stack(self, depth):
"""Reset the stack to the given stack depth.
Returning the popped items.
"""
self._stack, popped = self._stack[:depth], self._stack[depth:]
return popped
def make_block(self, kind, end, reset_stack=True, handler=None):
"""Make a new block
"""
d = {
'kind': BlockKind(kind),
'end': end,
'entry_stack': len(self._stack),
}
if reset_stack:
d['stack_depth'] = len(self._stack)
else:
d['stack_depth'] = None
d['handler'] = handler
return d
def pop_block(self):
"""Pop a block and unwind the stack
"""
b = self._blockstack.pop()
self.reset_stack(b['stack_depth'])
return b
def pop_block_and_above(self, blk):
"""Find *blk* in the blockstack and remove it and all blocks above it
from the stack.
"""
idx = self._blockstack.index(blk)
assert 0 <= idx < len(self._blockstack)
self._blockstack = self._blockstack[:idx]
def get_top_block(self, kind):
"""Find the first block that matches *kind*
"""
kind = BlockKind(kind)
for bs in reversed(self._blockstack):
if bs['kind'] == kind:
return bs
def has_active_try(self):
"""Returns a boolean indicating if the top-block is a *try* block
"""
return self.get_top_block('TRY') is not None
def get_varname(self, inst):
"""Get referenced variable name from the oparg
"""
return self._bytecode.co_varnames[inst.arg]
def terminate(self):
"""Mark block as terminated
"""
self._terminated = True
def fork(self, pc, npop=0, npush=0, extra_block=None):
"""Fork the state
"""
# Handle changes on the stack
stack = list(self._stack)
if npop:
assert 0 <= npop <= len(self._stack)
nstack = len(self._stack) - npop
stack = stack[:nstack]
if npush:
assert 0 <= npush
for i in range(npush):
stack.append(self.make_temp())
# Handle changes on the blockstack
blockstack = list(self._blockstack)
if extra_block:
blockstack.append(extra_block)
self._outedges.append(Edge(
pc=pc, stack=tuple(stack), npush=npush,
blockstack=tuple(blockstack),
))
self.terminate()
def split_new_block(self):
"""Split the state
"""
self.fork(pc=self._pc)
def get_outgoing_states(self):
"""Get states for each outgoing edges
"""
# Should only call once
assert not self._outgoing_phis
ret = []
for edge in self._outedges:
state = State(bytecode=self._bytecode, pc=edge.pc,
nstack=len(edge.stack), blockstack=edge.blockstack)
ret.append(state)
# Map outgoing_phis
for phi, i in state._phis.items():
self._outgoing_phis[phi] = edge.stack[i]
return ret
def get_outgoing_edgepushed(self):
"""
Returns
-------
Dict[int, int]
where keys are the PC
values are the edge-pushed stack values
"""
return {edge.pc: tuple(edge.stack[-edge.npush:])
for edge in self._outedges}
Edge = namedtuple("Edge", ["pc", "stack", "blockstack", "npush"])
class AdaptDFA(object):
"""Adapt Flow to the old DFA class expected by Interpreter
"""
def __init__(self, flow):
self._flow = flow
@property
def infos(self):
return self._flow.block_infos
AdaptBlockInfo = namedtuple(
"AdaptBlockInfo",
["insts", "outgoing_phis", "blockstack", "active_try_block",
"outgoing_edgepushed"],
)
def adapt_state_infos(state):
return AdaptBlockInfo(
insts=tuple(state.instructions),
outgoing_phis=state.outgoing_phis,
blockstack=state.blockstack_initial,
active_try_block=state.find_initial_try_block(),
outgoing_edgepushed=state.get_outgoing_edgepushed(),
)
def _flatten_inst_regs(iterable):
"""Flatten an iterable of registers used in an instruction
"""
for item in iterable:
if isinstance(item, str):
yield item
elif isinstance(item, (tuple, list)):
for x in _flatten_inst_regs(item):
yield x
class AdaptCFA(object):
"""Adapt Flow to the old CFA class expected by Interpreter
"""
def __init__(self, flow):
self._flow = flow
self._blocks = {}
for offset, blockinfo in flow.block_infos.items():
self._blocks[offset] = AdaptCFBlock(blockinfo, offset)
backbone = self._flow.cfgraph.backbone()
graph = flow.cfgraph
# Find backbone
backbone = graph.backbone()
# Filter out in loop blocks (Assuming no other cyclic control blocks)
# This is to unavoid variables defined in loops being considered as
# function scope.
inloopblocks = set()
for b in self.blocks.keys():
if graph.in_loops(b):
inloopblocks.add(b)
self._backbone = backbone - inloopblocks
@property
def graph(self):
return self._flow.cfgraph
@property
def backbone(self):
return self._backbone
@property
def blocks(self):
return self._blocks
def iterliveblocks(self):
for b in sorted(self.blocks):
yield self.blocks[b]
def dump(self):
self._flow.cfgraph.dump()
class AdaptCFBlock(object):
def __init__(self, blockinfo, offset):
self.offset = offset
self.body = tuple(i for i, _ in blockinfo.insts)
| sklam/numba | numba/core/byteflow.py | Python | bsd-2-clause | 42,923 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert ``lcov`` report into "cobertura" format.
Using the converted report, print a table of the report and
exit with status code 0 if all files have 100% coverage.
"""
import argparse
import sys
import tempfile
import pycobertura
import lcov_cobertura
THRESHOLD = 1.0 # 100%
def report_coverage(lcov_filename):
with open(lcov_filename, "r") as file_obj:
contents = file_obj.read()
converter = lcov_cobertura.LcovCobertura(contents)
cobertura_xml = converter.convert()
with tempfile.NamedTemporaryFile(mode="w+") as file_obj:
file_obj.write(cobertura_xml)
file_obj.seek(0)
report = file_obj.name
cobertura = pycobertura.Cobertura(report)
reporter = pycobertura.TextReporter(cobertura)
print(reporter.generate())
# The status code will be the number of files under the
# threshold.
return sum(
cobertura.line_rate(source_file) < THRESHOLD
for source_file in cobertura.files()
)
def main():
parser = argparse.ArgumentParser(
description="Convert lcov output to cobertura XML and report."
)
parser.add_argument(
"--lcov-filename",
dest="lcov_filename",
required=True,
help="Filename of `lcov` report to be converted.",
)
args = parser.parse_args()
status_code = report_coverage(args.lcov_filename)
sys.exit(status_code)
if __name__ == "__main__":
main()
| dhermes/bezier | scripts/report_lcov.py | Python | apache-2.0 | 1,983 |
#! /usr/bin/env python
# encoding: utf-8
# DC 2008
# Thomas Nagy 2010 (ita)
import re
from waflib import Utils
from waflib.Tools import fc, fc_config, fc_scan
from waflib.Configure import conf
@conf
def find_ifort(conf):
fc = conf.find_program('ifort', var='FC')
fc = conf.cmd_to_list(fc)
conf.get_ifort_version(fc)
conf.env.FC_NAME = 'IFORT'
@conf
def ifort_modifier_cygwin(conf):
raise NotImplementedError("Ifort on cygwin not yet implemented")
@conf
def ifort_modifier_platform(conf):
dest_os = conf.env['DEST_OS'] or Utils.unversioned_sys_platform()
ifort_modifier_func = getattr(conf, 'ifort_modifier_' + dest_os, None)
if ifort_modifier_func:
ifort_modifier_func()
@conf
def get_ifort_version(conf, fc):
"""get the compiler version"""
version_re = re.compile(r"ifort\s*\(IFORT\)\s*(?P<major>\d*)\.(?P<minor>\d*)", re.I).search
cmd = fc + ['--version']
out, err = fc_config.getoutput(conf, cmd, stdin=False)
if out:
match = version_re(out)
else:
match = version_re(err)
if not match:
conf.fatal('cannot determine ifort version.')
k = match.groupdict()
conf.env['FC_VERSION'] = (k['major'], k['minor'])
def configure(conf):
conf.find_ifort()
conf.find_ar()
conf.fc_flags()
conf.ifort_modifier_platform()
| Gnomescroll/Gnomescroll | server/waflib/Tools/ifort.py | Python | gpl-3.0 | 1,244 |
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Screens.ChoiceBox import ChoiceBox
from Components.ActionMap import ActionMap, NumberActionMap
from Components.Sources.List import List
from Components.Sources.StaticText import StaticText
from Components.config import config, configfile, getConfigListEntry
from Components.ConfigList import ConfigList, ConfigListScreen
from Components.MenuList import MenuList
from Tools.LoadPixmap import LoadPixmap
from enigma import eTimer, RT_HALIGN_LEFT, eListboxPythonMultiContent, gFont, getDesktop, eSize, ePoint
from xml.etree import ElementTree
from operator import itemgetter
import os, time
import urllib2
fb = getDesktop(0).size()
if fb.width() > 1024:
sizeH = fb.width() - 100
HDSKIN = True
else:
# sizeH = fb.width() - 50
sizeH = 700
HDSKIN = False
class OscamInfo:
def __init__(self):
pass
TYPE = 0
NAME = 1
PROT = 2
CAID_SRVID = 3
SRVNAME = 4
ECMTIME = 5
IP_PORT = 6
HEAD = { NAME: _("Label"), PROT: _("Protocol"),
CAID_SRVID: "CAID:SrvID", SRVNAME: _("Serv.Name"),
ECMTIME: _("ECM-Time"), IP_PORT: _("IP-Address") }
version = ""
def confPath(self):
search_dirs = [ "/usr", "/var", "/etc" ]
sdirs = " ".join(search_dirs)
cmd = 'find %s -name "oscam.conf"' % sdirs
res = os.popen(cmd).read()
if res == "":
return None
else:
return res.replace("\n", "")
def getUserData(self):
err = ""
self.oscamconf = self.confPath()
self.username = ""
self.password = ""
if self.oscamconf is not None:
data = open(self.oscamconf, "r").readlines()
webif = False
httpuser = httppwd = httpport = False
for i in data:
if "[webif]" in i.lower():
webif = True
elif "httpuser" in i.lower():
httpuser = True
user = i.split("=")[1].strip()
elif "httppwd" in i.lower():
httppwd = True
pwd = i.split("=")[1].strip()
elif "httpport" in i.lower():
httpport = True
port = i.split("=")[1].strip()
self.port = port
if not webif:
err = _("There is no [webif] section in oscam.conf")
elif not httpuser:
err = _("No httpuser defined in oscam.conf")
elif not httppwd:
err = _("No httppwd defined in oscam.conf")
elif not httpport:
err = _("No httpport defined in oscam.conf. This value is required!")
if err != "":
return err
else:
return user, pwd, port
else:
return _("file oscam.conf could not be found")
def openWebIF(self, part = None, reader = None):
if config.oscaminfo.userdatafromconf.value:
self.ip = "127.0.0.1"
udata = self.getUserData()
if isinstance(udata, str):
if "httpuser" in udata:
self.username=""
elif "httppwd" in udata:
self.password = ""
else:
return False, udata
else:
self.port = udata[2]
self.username = udata[0]
self.password = udata[1]
else:
self.ip = ".".join("%d" % d for d in config.oscaminfo.ip.value)
self.port = config.oscaminfo.port.value
self.username = config.oscaminfo.username.value
self.password = config.oscaminfo.password.value
if part is None:
self.url = "http://%s:%s/oscamapi.html?part=status" % ( self.ip, self.port )
else:
self.url = "http://%s:%s/oscamapi.html?part=%s" % (self.ip, self.port, part )
if part is not None and reader is not None:
self.url = "http://%s:%s/oscamapi.html?part=%s&label=%s" % ( self.ip, self.port, part, reader )
print "URL=%s" % self.url
pwman = urllib2.HTTPPasswordMgrWithDefaultRealm()
pwman.add_password( None, self.url, self.username, self.password )
handlers = urllib2.HTTPDigestAuthHandler( pwman )
opener = urllib2.build_opener( urllib2.HTTPHandler, handlers )
urllib2.install_opener( opener )
request = urllib2.Request( self.url )
err = False
try:
data = urllib2.urlopen( request ).read()
# print data
except urllib2.URLError, e:
if hasattr(e, "reason"):
err = str(e.reason)
elif hasattr(e, "code"):
err = str(e.code)
if err is not False:
print "[openWebIF] Fehler: %s" % err
return False, err
else:
return True, data
def readXML(self, typ):
if typ == "l":
self.showLog = True
part = "status&appendlog=1"
else:
self.showLog = False
part = None
result = self.openWebIF(part)
retval = []
tmp = {}
if result[0]:
if not self.showLog:
data = ElementTree.XML(result[1])
# if typ=="version":
# if data.attrib.has_key("version"):
# self.version = data.attrib["version"]
# else:
# self.version = "n/a"
# return self.version
status = data.find("status")
clients = status.findall("client")
for cl in clients:
name = cl.attrib["name"]
proto = cl.attrib["protocol"]
if cl.attrib.has_key("au"):
au = cl.attrib["au"]
else:
au = ""
caid = cl.find("request").attrib["caid"]
srvid = cl.find("request").attrib["srvid"]
if cl.find("request").attrib.has_key("ecmtime"):
ecmtime = cl.find("request").attrib["ecmtime"]
if ecmtime == "0" or ecmtime == "":
ecmtime = "n/a"
else:
ecmtime = str(float(ecmtime) / 1000)[:5]
else:
ecmtime = "not available"
srvname = cl.find("request").text
if srvname is not None:
if ":" in srvname:
srvname_short = srvname.split(":")[1].strip()
else:
srvname_short = srvname
else:
srvname_short = "n/A"
login = cl.find("times").attrib["login"]
online = cl.find("times").attrib["online"]
if proto.lower() == "dvbapi":
ip = ""
else:
ip = cl.find("connection").attrib["ip"]
if ip == "0.0.0.0":
ip = ""
port = cl.find("connection").attrib["port"]
connstatus = cl.find("connection").text
if name != "" and name != "anonymous" and proto != "":
try:
tmp[cl.attrib["type"]].append( (name, proto, "%s:%s" % (caid, srvid), srvname_short, ecmtime, ip, connstatus) )
except KeyError:
tmp[cl.attrib["type"]] = []
tmp[cl.attrib["type"]].append( (name, proto, "%s:%s" % (caid, srvid), srvname_short, ecmtime, ip, connstatus) )
else:
if "<![CDATA" not in result[1]:
tmp = result[1].replace("<log>", "<log><![CDATA[").replace("</log>", "]]></log>")
else:
tmp = result[1]
data = ElementTree.XML(tmp)
log = data.find("log")
logtext = log.text
if typ == "s":
if tmp.has_key("r"):
for i in tmp["r"]:
retval.append(i)
if tmp.has_key("p"):
for i in tmp["p"]:
retval.append(i)
elif typ == "c":
if tmp.has_key("c"):
for i in tmp["c"]:
retval.append(i)
elif typ == "l":
tmp = logtext.split("\n")
retval = []
for i in tmp:
tmp2 = i.split(" ")
if len(tmp2) > 2:
del tmp2[2]
txt = ""
for j in tmp2:
txt += "%s " % j.strip()
retval.append( txt )
return retval
else:
return result[1]
def getVersion(self):
xmldata = self.openWebIF()
if xmldata[0]:
data = ElementTree.XML(xmldata[1])
if data.attrib.has_key("version"):
self.version = data.attrib["version"]
else:
self.version = "n/a"
return self.version
else:
self.version = "n/a"
return self.version
def getTotalCards(self, reader):
xmldata = self.openWebIF(part = "entitlement", reader = reader)
if xmldata[0]:
xmld = ElementTree.XML(xmldata[1])
cards = xmld.find("reader").find("cardlist")
cardTotal = cards.attrib["totalcards"]
return cardTotal
else:
return None
def getReaders(self, spec = None):
xmldata = self.openWebIF()
readers = []
if xmldata[0]:
data = ElementTree.XML(xmldata[1])
status = data.find("status")
clients = status.findall("client")
for cl in clients:
if cl.attrib.has_key("type"):
if cl.attrib["type"] == "p" or cl.attrib["type"] == "r":
if spec is not None:
proto = cl.attrib["protocol"]
if spec in proto:
name = cl.attrib["name"]
cards = self.getTotalCards(name)
readers.append( ( "%s ( %s Cards )" % (name, cards), name) )
else:
if cl.attrib["name"] != "" and cl.attrib["name"] != "" and cl.attrib["protocol"] != "":
readers.append( (cl.attrib["name"], cl.attrib["name"]) ) # return tuple for later use in Choicebox
return readers
else:
return None
def getClients(self):
xmldata = self.openWebIF()
clientnames = []
if xmldata[0]:
data = ElementTree.XML(xmldata[1])
status = data.find("status")
clients = status.findall("client")
for cl in clients:
if cl.attrib.has_key("type"):
if cl.attrib["type"] == "c":
readers.append( (cl.attrib["name"], cl.attrib["name"]) ) # return tuple for later use in Choicebox
return clientnames
else:
return None
def getECMInfo(self, ecminfo):
result = []
if os.path.exists(ecminfo):
data = open(ecminfo, "r").readlines()
for i in data:
if "caid" in i:
result.append( ("CAID", i.split(":")[1].strip()) )
elif "pid" in i:
result.append( ("PID", i.split(":")[1].strip()) )
elif "prov" in i:
result.append( (_("Provider"), i.split(":")[1].strip()) )
elif "reader" in i:
result.append( ("Reader", i.split(":")[1].strip()) )
elif "from" in i:
result.append( (_("Address"), i.split(":")[1].strip()) )
elif "protocol" in i:
result.append( (_("Protocol"), i.split(":")[1].strip()) )
elif "hops" in i:
result.append( ("Hops", i.split(":")[1].strip()) )
elif "ecm time" in i:
result.append( (_("ECM Time"), i.split(":")[1].strip()) )
return result
else:
return "%s not found" % self.ecminfo
class oscMenuList(MenuList):
def __init__(self, list, itemH = 25):
MenuList.__init__(self, list, False, eListboxPythonMultiContent)
self.l.setItemHeight(itemH)
self.l.setFont(0, gFont("Regular", 18))
self.l.setFont(1, gFont("Regular", 16))
self.clientFont = gFont("Regular", 14)
self.l.setFont(2, self.clientFont)
self.l.setFont(3, gFont("Regular", 12))
self.l.setFont(4, gFont("Regular", 28))
self.l.setFont(5, gFont("Regular", 28))
self.clientFont1080 = gFont("Regular", 24)
self.l.setFont(6, self.clientFont1080)
self.l.setFont(7, gFont("Regular", 24))
class OscamInfoMenu(Screen):
def __init__(self, session):
self.session = session
self.menu = [ _("Show /tmp/ecm.info"), _("Show Clients"), _("Show Readers/Proxies"), _("Show Log"), _("Card infos (CCcam-Reader)"), _("ECM Statistics"), _("Setup") ]
Screen.__init__(self, session)
self.osc = OscamInfo()
self["mainmenu"] = oscMenuList([])
self["actions"] = NumberActionMap(["OkCancelActions", "InputActions", "ColorActions"],
{
"ok": self.ok,
"cancel": self.exit,
"red": self.red,
"green": self.green,
"yellow": self.yellow,
"blue": self.blue,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal,
"up": self.up,
"down": self.down
}, -1)
self.onLayoutFinish.append(self.showMenu)
def ok(self):
selected = self["mainmenu"].getSelectedIndex()
self.goEntry(selected)
def cancel(self):
self.close()
def exit(self):
self.close()
def keyNumberGlobal(self, num):
if num == 0:
numkey = 10
else:
numkey = num
if numkey < len(self.menu) - 3:
self["mainmenu"].moveToIndex(numkey + 3)
self.goEntry(numkey + 3)
def red(self):
self["mainmenu"].moveToIndex(0)
self.goEntry(0)
def green(self):
self["mainmenu"].moveToIndex(1)
self.goEntry(1)
def yellow(self):
self["mainmenu"].moveToIndex(2)
self.goEntry(2)
def blue(self):
self["mainmenu"].moveToIndex(3)
self.goEntry(3)
def up(self):
pass
def down(self):
pass
def goEntry(self, entry):
if entry == 0:
if os.path.exists("/tmp/ecm.info"):
self.session.open(oscECMInfo)
else:
pass
elif entry == 1:
if config.oscaminfo.userdatafromconf.value:
if self.osc.confPath() is None:
config.oscaminfo.userdatafromconf.value = False
config.oscaminfo.userdatafromconf.save()
self.session.openWithCallback(self.ErrMsgCallback, MessageBox, _("File oscam.conf not found.\nPlease enter username/password manually."), MessageBox.TYPE_ERROR)
else:
self.session.open(oscInfo, "c")
else:
self.session.open(oscInfo, "c")
elif entry == 2:
if config.oscaminfo.userdatafromconf.value:
if self.osc.confPath() is None:
config.oscaminfo.userdatafromconf.value = False
config.oscaminfo.userdatafromconf.save()
self.session.openWithCallback(self.ErrMsgCallback, MessageBox, _("File oscam.conf not found.\nPlease enter username/password manually."), MessageBox.TYPE_ERROR)
else:
self.session.open(oscInfo, "s")
else:
self.session.open(oscInfo, "s")
elif entry == 3:
if config.oscaminfo.userdatafromconf.value:
if self.osc.confPath() is None:
config.oscaminfo.userdatafromconf.value = False
config.oscaminfo.userdatafromconf.save()
self.session.openWithCallback(self.ErrMsgCallback, MessageBox, _("File oscam.conf not found.\nPlease enter username/password manually."), MessageBox.TYPE_ERROR)
else:
self.session.open(oscInfo, "l")
else:
self.session.open(oscInfo, "l")
elif entry == 4:
osc = OscamInfo()
reader = osc.getReaders("cccam") # get list of available CCcam-Readers
if isinstance(reader, list):
if len(reader) == 1:
self.session.open(oscEntitlements, reader[0][1])
else:
self.callbackmode = "cccam"
self.session.openWithCallback(self.chooseReaderCallback, ChoiceBox, title = _("Please choose CCcam-Reader"), list=reader)
elif entry == 5:
osc = OscamInfo()
reader = osc.getReaders()
if reader is not None:
reader.append( ("All", "all") )
if isinstance(reader, list):
if len(reader) == 1:
self.session.open(oscReaderStats, reader[0][1])
else:
self.callbackmode = "readers"
self.session.openWithCallback(self.chooseReaderCallback, ChoiceBox, title = _("Please choose reader"), list=reader)
elif entry == 6:
self.session.open(OscamInfoConfigScreen)
def chooseReaderCallback(self, retval):
print retval
if retval is not None:
if self.callbackmode == "cccam":
self.session.open(oscEntitlements, retval[1])
else:
self.session.open(oscReaderStats, retval[1])
def ErrMsgCallback(self, retval):
print retval
self.session.open(OscamInfoConfigScreen)
def buildMenu(self, mlist):
screenwidth = getDesktop(0).size().width()
keys = ["red", "green", "yellow", "blue", "1", "2", "3", "4", "5", "6", "7", "8", "9", "0", ""]
menuentries = []
y = 0
for x in mlist:
res = [ x ]
if x.startswith("--"):
png = LoadPixmap("/usr/share/enigma2/skin_default/div-h.png")
if png is not None:
if screenwidth and screenwidth == 1920:
res.append((eListboxPythonMultiContent.TYPE_PIXMAP, 10,3,360, 4, png))
res.append((eListboxPythonMultiContent.TYPE_TEXT, 85, 7, 900, 35, 4, RT_HALIGN_LEFT, x[2:]))
else:
res.append((eListboxPythonMultiContent.TYPE_PIXMAP, 10,0,360, 2, png))
res.append((eListboxPythonMultiContent.TYPE_TEXT, 45, 3, 800, 25, 0, RT_HALIGN_LEFT, x[2:]))
png2 = LoadPixmap("/usr/share/enigma2/skin_default/buttons/key_" + keys[y] + ".png")
if png2 is not None:
if screenwidth and screenwidth == 1920:
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHABLEND, 10, 3, 53, 38, png2))
else:
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, 5, 3, 35, 25, png2))
else:
if screenwidth and screenwidth == 1920:
res.append((eListboxPythonMultiContent.TYPE_TEXT, 85, 7, 900, 35, 4, RT_HALIGN_LEFT, x))
else:
res.append((eListboxPythonMultiContent.TYPE_TEXT, 45, 00, 800, 25, 0, RT_HALIGN_LEFT, x))
png2 = LoadPixmap("/usr/share/enigma2/skin_default/buttons/key_" + keys[y] + ".png")
if png2 is not None:
if screenwidth and screenwidth == 1920:
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHABLEND, 10, 3, 53, 38, png2))
else:
res.append((eListboxPythonMultiContent.TYPE_PIXMAP_ALPHATEST, 5, 0, 35, 25, png2))
menuentries.append(res)
if y < len(keys) - 1:
y += 1
return menuentries
def showMenu(self):
entr = self.buildMenu(self.menu)
self.setTitle(_("Oscam Info - Main Menu"))
self["mainmenu"].l.setList(entr)
self["mainmenu"].moveToIndex(0)
class oscECMInfo(Screen, OscamInfo):
def __init__(self, session):
Screen.__init__(self, session)
self.ecminfo = "/tmp/ecm.info"
self["output"] = oscMenuList([])
if config.oscaminfo.autoupdate.value:
self.loop = eTimer()
self.loop.callback.append(self.showData)
timeout = config.oscaminfo.intervall.value * 1000
self.loop.start(timeout, False)
self["actions"] = ActionMap(["OkCancelActions"],
{
"ok": self.exit,
"cancel": self.exit
}, -1)
self.onLayoutFinish.append(self.showData)
def exit(self):
if config.oscaminfo.autoupdate.value:
self.loop.stop()
self.close()
def buildListEntry(self, listentry):
screenwidth = getDesktop(0).size().width()
if screenwidth and screenwidth == 1920:
return [
None,
(eListboxPythonMultiContent.TYPE_TEXT, 10, 10, 300, 35, 4, RT_HALIGN_LEFT, listentry[0]),
(eListboxPythonMultiContent.TYPE_TEXT, 300, 10, 300, 35, 4, RT_HALIGN_LEFT, listentry[1])
]
else:
return [
None,
(eListboxPythonMultiContent.TYPE_TEXT, 10, 10, 300, 30, 0, RT_HALIGN_LEFT, listentry[0]),
(eListboxPythonMultiContent.TYPE_TEXT, 300, 10, 300, 30, 0, RT_HALIGN_LEFT, listentry[1])
]
def showData(self):
data = self.getECMInfo(self.ecminfo)
#print data
out = []
y = 0
for i in data:
out.append(self.buildListEntry(i))
self["output"].l.setItemHeight(35)
self["output"].l.setList(out)
self["output"].selectionEnabled(True)
class oscInfo(Screen, OscamInfo):
def __init__(self, session, what):
global HDSKIN, sizeH
self.session = session
self.what = what
self.firstrun = True
self.webif_data = self.readXML(typ = self.what)
entry_count = len( self.webif_data )
# entry_count = len(self.readXML(typ = self.what))
ysize = (entry_count + 4) * 25
ypos = 10
self.sizeLH = sizeH - 20
self.skin = """<screen position="center,center" size="%d, %d" title="Client Info" >""" % (sizeH, ysize)
button_width = int(sizeH / 4)
for k, v in enumerate(["red", "green", "yellow", "blue"]):
xpos = k * button_width
self.skin += """<ePixmap name="%s" position="%d,%d" size="35,25" pixmap="/usr/share/enigma2/skin_default/buttons/key_%s.png" zPosition="1" transparent="1" alphatest="on" />""" % (v, xpos, ypos, v)
self.skin += """<widget source="key_%s" render="Label" position="%d,%d" size="%d,%d" font="Regular;16" zPosition="1" valign="center" transparent="1" />""" % (v, xpos + 40, ypos, button_width, 20)
self.skin +="""<ePixmap name="divh" position="0,37" size="%d,2" pixmap="/usr/share/enigma2/skin_default/div-h.png" transparent="1" alphatest="on" />""" % sizeH
self.skin +="""<widget name="output" position="10,45" size="%d,%d" zPosition="1" scrollbarMode="showOnDemand" />""" % ( self.sizeLH, ysize)
self.skin += """</screen>"""
Screen.__init__(self, session)
self.mlist = oscMenuList([])
self["output"] = self.mlist
self.errmsg = ""
self["key_red"] = StaticText(_("Close"))
if self.what == "c":
self["key_green"] = StaticText("")
self["key_yellow"] = StaticText("Servers")
self["key_blue"] = StaticText("Log")
elif self.what == "s":
self["key_green"] = StaticText("Clients")
self["key_yellow"] = StaticText("")
self["key_blue"] = StaticText("Log")
elif self.what == "l":
self["key_green"] = StaticText("Clients")
self["key_yellow"] = StaticText("Servers")
self["key_blue"] = StaticText("")
else:
self["key_green"] = StaticText("Clients")
self["key_yellow"] = StaticText("Servers")
self["key_blue"] = StaticText("Log")
self.fieldSizes = []
self.fs2 = {}
if config.oscaminfo.autoupdate.value:
self.loop = eTimer()
self.loop.callback.append(self.showData)
timeout = config.oscaminfo.intervall.value * 1000
self.loop.start(timeout, False)
self["actions"] = ActionMap(["OkCancelActions", "ColorActions"],
{
"ok": self.showData,
"cancel": self.exit,
"red": self.exit,
"green": self.key_green,
"yellow": self.key_yellow,
"blue": self.key_blue
}, -1)
self.onLayoutFinish.append(self.showData)
def key_green(self):
if self.what == "c":
pass
else:
self.what = "c"
self.showData()
def key_yellow(self):
if self.what == "s":
pass
else:
self.what = "s"
self.showData()
def key_blue(self):
if self.what == "l":
pass
else:
self.what = "l"
self.showData()
def exit(self):
if config.oscaminfo.autoupdate.value:
self.loop.stop()
self.close()
def buildListEntry(self, listentry, heading = False):
res = [ None ]
x = 0
if not HDSKIN:
self.fieldsize = [ 100, 130, 100, 150, 80, 130 ]
self.startPos = [ 10, 110, 240, 340, 490, 570 ]
useFont = 3
else:
self.fieldsize = [ 150, 200, 130, 200, 100, 150 ]
self.startPos = [ 50, 200, 400, 530, 730, 830 ]
useFont = 1
if isinstance(self.errmsg, tuple):
useFont = 0 # overrides previous font-size in case of an error message. (if self.errmsg is a tuple, an error occurred which will be displayed instead of regular results
if not heading:
status = listentry[len(listentry)-1]
colour = "0xffffff"
if status == "OK" or "CONNECTED" or status == "CARDOK":
colour = "0x389416"
if status == "NEEDINIT" or status == "CARDOK":
colour = "0xbab329"
if status == "OFF" or status == "ERROR":
colour = "0xf23d21"
else:
colour = "0xffffff"
for i in listentry[:-1]:
xsize = self.fieldsize[x]
xpos = self.startPos[x]
res.append( (eListboxPythonMultiContent.TYPE_TEXT, xpos, 0, xsize, 20, useFont, RT_HALIGN_LEFT, i, int(colour, 16)) )
x += 1
if heading:
pos = 19
res.append( (eListboxPythonMultiContent.TYPE_PIXMAP, 0, pos, self.sizeLH, useFont, LoadPixmap("/usr/share/enigma2/skin_default/div-h.png")))
return res
def buildLogListEntry(self, listentry):
res = [ None ]
for i in listentry:
if i.strip() != "" or i is not None:
res.append( (eListboxPythonMultiContent.TYPE_TEXT, 5, 0, self.sizeLH,14, 2, RT_HALIGN_LEFT, i) )
return res
def calcSizes(self, entries):
self.fs2 = {}
colSize = [ 100, 200, 150, 200, 150, 100 ]
for h in entries:
for i, j in enumerate(h[:-1]):
try:
self.fs2[i].append(colSize[i])
except KeyError:
self.fs2[i] = []
self.fs2[i].append(colSize[i])
sizes = []
for i in self.fs2.keys():
sizes.append(self.fs2[i])
return sizes
def changeScreensize(self, new_height, new_width = None):
if new_width is None:
new_width = sizeH
self.instance.resize(eSize(new_width, new_height))
fb = getDesktop(0).size()
new_posY = int(( fb.height() / 2 ) - ( new_height / 2 ))
x = int( ( fb.width() - sizeH ) / 2 )
self.instance.move(ePoint(x, new_posY))
self["output"].resize(eSize(self.sizeLH, new_height - 20))
self["key_red"].setText(_("Close"))
if self.what == "c":
self["key_green"].setText("")
self["key_yellow"].setText("Servers")
self["key_blue"].setText("Log")
self["output"].l.setItemHeight(20)
elif self.what == "s":
self["key_green"].setText("Clients")
self["key_yellow"].setText("")
self["key_blue"].setText("Log")
self["output"].l.setItemHeight(20)
elif self.what == "l":
self["key_green"].setText("Clients")
self["key_yellow"].setText("Servers")
self["key_blue"].setText("")
self["output"].l.setItemHeight(14)
else:
self["key_green"].setText("Clients")
self["key_yellow"].setText("Servers")
self["key_blue"].setText("Log")
def showData(self):
if self.firstrun:
data = self.webif_data
self.firstrun = False
else:
data = self.readXML(typ = self.what)
if not isinstance(data,str):
out = []
if self.what != "l":
heading = ( self.HEAD[self.NAME], self.HEAD[self.PROT], self.HEAD[self.CAID_SRVID],
self.HEAD[self.SRVNAME], self.HEAD[self.ECMTIME], self.HEAD[self.IP_PORT], "")
outlist = [heading]
for i in data:
outlist.append( i )
self.fieldsize = self.calcSizes(outlist)
out = [ self.buildListEntry(heading, heading=True)]
for i in data:
out.append(self.buildListEntry(i))
else:
for i in data:
if i != "":
out.append( self.buildLogListEntry( (i,) ))
#out.reverse()
ysize = (len(out) + 4 ) * 25
if self.what == "c":
self.changeScreensize( ysize )
self.setTitle("Client Info ( Oscam-Version: %s )" % self.getVersion())
elif self.what == "s":
self.changeScreensize( ysize )
self.setTitle("Server Info( Oscam-Version: %s )" % self.getVersion())
elif self.what == "l":
self.changeScreensize( 500 )
self.setTitle("Oscam Log ( Oscam-Version: %s )" % self.getVersion())
self["output"].l.setList(out)
self["output"].selectionEnabled(False)
else:
self.errmsg = (data,)
if config.oscaminfo.autoupdate.value:
self.loop.stop()
out = []
self.fieldsize = self.calcSizes( [(data,)] )
for i in self.errmsg:
out.append( self.buildListEntry( (i,) ))
ysize = (len(out) + 4 ) * 25
self.changeScreensize( ysize )
self.setTitle(_("Error") + data)
self["output"].l.setList(out)
self["output"].selectionEnabled(False)
class oscEntitlements(Screen, OscamInfo):
global HDSKIN, sizeH
sizeLH = sizeH - 20
skin = """<screen position="center,center" size="%s, 400" title="Client Info" >
<widget source="output" render="Listbox" position="10,10" size="%s,400" scrollbarMode="showOnDemand" >
<convert type="TemplatedMultiContent">
{"templates":
{"default": (55,[
MultiContentEntryText(pos = (0, 1), size = (80, 24), font=0, flags = RT_HALIGN_LEFT, text = 0), # index 0 is caid
MultiContentEntryText(pos = (90, 1), size = (150, 24), font=0, flags = RT_HALIGN_LEFT, text = 1), # index 1 is csystem
MultiContentEntryText(pos = (250, 1), size = (40, 24), font=0, flags = RT_HALIGN_LEFT, text = 2), # index 2 is hop 1
MultiContentEntryText(pos = (290, 1), size = (40, 24), font=0, flags = RT_HALIGN_LEFT, text = 3), # index 3 is hop 2
MultiContentEntryText(pos = (330, 1), size = (40, 24), font=0, flags = RT_HALIGN_LEFT, text = 4), # index 4 is hop 3
MultiContentEntryText(pos = (370, 1), size = (40, 24), font=0, flags = RT_HALIGN_LEFT, text = 5), # index 5 is hop 4
MultiContentEntryText(pos = (410, 1), size = (40, 24), font=0, flags = RT_HALIGN_LEFT, text = 6), # index 6 is hop 5
MultiContentEntryText(pos = (480, 1), size = (70, 24), font=0, flags = RT_HALIGN_LEFT, text = 7), # index 7 is sum of cards for caid
MultiContentEntryText(pos = (550, 1), size = (80, 24), font=0, flags = RT_HALIGN_LEFT, text = 8), # index 8 is reshare
MultiContentEntryText(pos = (0, 25), size = (700, 24), font=1, flags = RT_HALIGN_LEFT, text = 9), # index 9 is providers
]),
"HD": (55,[
MultiContentEntryText(pos = (0, 1), size = (80, 24), font=0, flags = RT_HALIGN_LEFT, text = 0), # index 0 is caid
MultiContentEntryText(pos = (90, 1), size = (150, 24), font=0, flags = RT_HALIGN_LEFT, text = 1), # index 1 is csystem
MultiContentEntryText(pos = (250, 1), size = (40, 24), font=0, flags = RT_HALIGN_LEFT, text = 2), # index 2 is hop 1
MultiContentEntryText(pos = (290, 1), size = (40, 24), font=0, flags = RT_HALIGN_LEFT, text = 3), # index 3 is hop 2
MultiContentEntryText(pos = (330, 1), size = (40, 24), font=0, flags = RT_HALIGN_LEFT, text = 4), # index 4 is hop 3
MultiContentEntryText(pos = (370, 1), size = (40, 24), font=0, flags = RT_HALIGN_LEFT, text = 5), # index 5 is hop 4
MultiContentEntryText(pos = (410, 1), size = (40, 24), font=0, flags = RT_HALIGN_LEFT, text = 6), # index 6 is hop 5
MultiContentEntryText(pos = (480, 1), size = (70, 24), font=0, flags = RT_HALIGN_LEFT, text = 7), # index 7 is sum of cards for caid
MultiContentEntryText(pos = (550, 1), size = (80, 24), font=0, flags = RT_HALIGN_LEFT, text = 8), # index 8 is reshare
MultiContentEntryText(pos = (630, 1), size = (1024, 50), font=1, flags = RT_HALIGN_LEFT, text = 9), # index 9 is providers
]),
},
"fonts": [gFont("Regular", 18),gFont("Regular", 14),gFont("Regular", 24),gFont("Regular", 20)],
"itemHeight": 56
}
</convert>
</widget>
</screen>""" % ( sizeH, sizeLH)
def __init__(self, session, reader):
global HDSKIN, sizeH
Screen.__init__(self, session)
self.mlist = oscMenuList([])
self.cccamreader = reader
self["output"] = List([ ])
self["actions"] = ActionMap(["OkCancelActions"],
{
"ok": self.showData,
"cancel": self.exit
}, -1)
self.onLayoutFinish.append(self.showData)
def exit(self):
self.close()
def buildList(self, data):
caids = data.keys()
caids.sort()
outlist = []
res = [ ("CAID", "System", "1", "2", "3", "4", "5", "Total", "Reshare", "") ]
for i in caids:
csum = 0
ca_id = i
csystem = data[i]["system"]
hops = data[i]["hop"]
csum += sum(hops)
creshare = data[i]["reshare"]
prov = data[i]["provider"]
if not HDSKIN:
providertxt = _("Providers: ")
linefeed = ""
else:
providertxt = ""
linefeed = "\n"
for j in prov:
providertxt += "%s - %s%s" % ( j[0], j[1], linefeed )
res.append( ( ca_id,
csystem,
str(hops[1]),str(hops[2]), str(hops[3]), str(hops[4]), str(hops[5]), str(csum), str(creshare),
providertxt[:-1]
) )
outlist.append(res)
return res
def showData(self):
xmldata_for_reader = self.openWebIF(part = "entitlement", reader = self.cccamreader)
xdata = ElementTree.XML(xmldata_for_reader[1])
reader = xdata.find("reader")
if reader.attrib.has_key("hostaddress"):
hostadr = reader.attrib["hostaddress"]
host_ok = True
else:
host_ok = False
cardlist = reader.find("cardlist")
cardTotal = cardlist.attrib["totalcards"]
cards = cardlist.findall("card")
caid = {}
for i in cards:
ccaid = i.attrib["caid"]
csystem = i.attrib["system"]
creshare = i.attrib["reshare"]
if not host_ok:
hostadr = i.find("hostaddress").text
chop = int(i.attrib["hop"])
if chop > 5:
chop = 5
if caid.has_key(ccaid):
if caid[ccaid].has_key("hop"):
caid[ccaid]["hop"][chop] += 1
else:
caid[ccaid]["hop"] = [ 0, 0, 0, 0, 0, 0 ]
caid[ccaid]["hop"][chop] += 1
caid[ccaid]["reshare"] = creshare
caid[ccaid]["provider"] = [ ]
provs = i.find("providers")
for prov in provs.findall("provider"):
caid[ccaid]["provider"].append( (prov.attrib["provid"], prov.text) )
caid[ccaid]["system"] = csystem
else:
caid[ccaid] = {}
if caid[ccaid].has_key("hop"):
caid[ccaid]["hop"][chop] += 1
else:
caid[ccaid]["hop"] = [ 0, 0, 0, 0, 0, 0]
caid[ccaid]["hop"][chop] += 1
caid[ccaid]["reshare"] = creshare
caid[ccaid]["provider"] = [ ]
provs = i.find("providers")
for prov in provs.findall("provider"):
caid[ccaid]["provider"].append( (prov.attrib["provid"], prov.text) )
caid[ccaid]["system"] = csystem
result = self.buildList(caid)
if HDSKIN:
self["output"].setStyle("HD")
else:
self["output"].setStyle("default")
self["output"].setList(result)
title = [ _("Reader"), self.cccamreader, _("Cards:"), cardTotal, "Server:", hostadr ]
self.setTitle( " ".join(title))
class oscReaderStats(Screen, OscamInfo):
global HDSKIN, sizeH
sizeLH = sizeH - 20
skin = """<screen position="center,center" size="%s, 400" title="Client Info" >
<widget source="output" render="Listbox" position="10,10" size="%s,400" scrollbarMode="showOnDemand" >
<convert type="TemplatedMultiContent">
{"templates":
{"default": (25,[
MultiContentEntryText(pos = (0, 1), size = (100, 24), font=0, flags = RT_HALIGN_LEFT, text = 0), # index 0 is caid
MultiContentEntryText(pos = (100, 1), size = (50, 24), font=0, flags = RT_HALIGN_LEFT, text = 1), # index 1 is csystem
MultiContentEntryText(pos = (150, 1), size = (150, 24), font=0, flags = RT_HALIGN_LEFT, text = 2), # index 2 is hop 1
MultiContentEntryText(pos = (300, 1), size = (60, 24), font=0, flags = RT_HALIGN_LEFT, text = 3), # index 3 is hop 2
MultiContentEntryText(pos = (360, 1), size = (60, 24), font=0, flags = RT_HALIGN_LEFT, text = 4), # index 4 is hop 3
MultiContentEntryText(pos = (420, 1), size = (80, 24), font=0, flags = RT_HALIGN_LEFT, text = 5), # index 5 is hop 4
MultiContentEntryText(pos = (510, 1), size = (80, 24), font=0, flags = RT_HALIGN_LEFT, text = 6), # index 6 is hop 5
MultiContentEntryText(pos = (590, 1), size = (80, 24), font=0, flags = RT_HALIGN_LEFT, text = 7), # index 7 is sum of cards for caid
]),
"HD": (25,[
MultiContentEntryText(pos = (0, 1), size = (200, 24), font=1, flags = RT_HALIGN_LEFT, text = 0), # index 0 is caid
MultiContentEntryText(pos = (200, 1), size = (70, 24), font=1, flags = RT_HALIGN_LEFT, text = 1), # index 1 is csystem
MultiContentEntryText(pos = (300, 1), size = (220, 24), font=1, flags = RT_HALIGN_LEFT, text = 2), # index 2 is hop 1
MultiContentEntryText(pos = (540, 1), size = (80, 24), font=1, flags = RT_HALIGN_LEFT, text = 3), # index 3 is hop 2
MultiContentEntryText(pos = (630, 1), size = (80, 24), font=1, flags = RT_HALIGN_LEFT, text = 4), # index 4 is hop 3
MultiContentEntryText(pos = (720, 1), size = (130, 24), font=1, flags = RT_HALIGN_LEFT, text = 5), # index 5 is hop 4
MultiContentEntryText(pos = (840, 1), size = (130, 24), font=1, flags = RT_HALIGN_LEFT, text = 6), # index 6 is hop 5
MultiContentEntryText(pos = (970, 1), size = (100, 24), font=1, flags = RT_HALIGN_LEFT, text = 7), # index 7 is sum of cards for caid
]),
},
"fonts": [gFont("Regular", 14),gFont("Regular", 18),gFont("Regular", 24),gFont("Regular", 20)],
"itemHeight": 26
}
</convert>
</widget>
</screen>""" % ( sizeH, sizeLH)
def __init__(self, session, reader):
global HDSKIN, sizeH
Screen.__init__(self, session)
if reader == "all":
self.allreaders = True
else:
self.allreaders = False
self.reader = reader
self.mlist = oscMenuList([])
self["output"] = List([ ])
self["actions"] = ActionMap(["OkCancelActions"],
{
"ok": self.showData,
"cancel": self.exit
}, -1)
self.onLayoutFinish.append(self.showData)
def exit(self):
self.close()
def buildList(self, data):
caids = data.keys()
caids.sort()
outlist = []
res = [ ("CAID", "System", "1", "2", "3", "4", "5", "Total", "Reshare", "") ]
for i in caids:
csum = 0
ca_id = i
csystem = data[i]["system"]
hops = data[i]["hop"]
csum += sum(hops)
creshare = data[i]["reshare"]
prov = data[i]["provider"]
if not HDSKIN:
providertxt = _("Providers: ")
linefeed = ""
else:
providertxt = ""
linefeed = "\n"
for j in prov:
providertxt += "%s - %s%s" % ( j[0], j[1], linefeed )
res.append( ( ca_id,
csystem,
str(hops[1]),str(hops[2]), str(hops[3]), str(hops[4]), str(hops[5]), str(csum), str(creshare),
providertxt[:-1]
) )
outlist.append(res)
return res
def sortData(self, datalist, sort_col, reverse = False):
return sorted(datalist, key=itemgetter(sort_col), reverse = reverse)
def showData(self):
readers = self.getReaders()
result = []
title2 = ""
for i in readers:
xmldata = self.openWebIF(part = "readerstats", reader = i[1])
emm_wri = emm_ski = emm_blk = emm_err = ""
if xmldata[0]:
xdata = ElementTree.XML(xmldata[1])
rdr = xdata.find("reader")
# emms = rdr.find("emmstats")
# if emms.attrib.has_key("totalwritten"):
# emm_wri = emms.attrib["totalwritten"]
# if emms.attrib.has_key("totalskipped"):
# emm_ski = emms.attrib["totalskipped"]
# if emms.attrib.has_key("totalblocked"):
# emm_blk = emms.attrib["totalblocked"]
# if emms.attrib.has_key("totalerror"):
# emm_err = emms.attrib["totalerror"]
ecmstat = rdr.find("ecmstats")
totalecm = ecmstat.attrib["totalecm"]
ecmcount = ecmstat.attrib["count"]
lastacc = ecmstat.attrib["lastaccess"]
ecm = ecmstat.findall("ecm")
if ecmcount > 0:
for j in ecm:
caid = j.attrib["caid"]
channel = j.attrib["channelname"]
avgtime = j.attrib["avgtime"]
lasttime = j.attrib["lasttime"]
retcode = j.attrib["rc"]
rcs = j.attrib["rcs"]
num = j.text
if rcs == "found":
avg_time = str(float(avgtime) / 1000)[:5]
last_time = str(float(lasttime) / 1000)[:5]
if j.attrib.has_key("lastrequest"):
lastreq = j.attrib["lastrequest"]
try:
last_req = lastreq.split("T")[1][:-5]
except IndexError:
last_req = time.strftime("%H:%M:%S",time.localtime(float(lastreq)))
else:
last_req = ""
else:
avg_time = last_time = last_req = ""
# if lastreq != "":
# last_req = lastreq.split("T")[1][:-5]
if self.allreaders:
result.append( (i[1], caid, channel, avg_time, last_time, rcs, last_req, int(num)) )
title2 = _("( All readers)")
else:
if i[1] == self.reader:
result.append( (i[1], caid, channel, avg_time, last_time, rcs, last_req, int(num)) )
title2 =_("(Show only reader:") + "%s )" % self.reader
outlist = self.sortData(result, 7, True)
out = [ ( _("Label"), _("CAID"), _("Channel"), _("ECM avg"), _("ECM last"), _("Status"), _("Last Req."), _("Total") ) ]
for i in outlist:
out.append( (i[0], i[1], i[2], i[3], i[4], i[5], i[6], str(i[7])) )
if HDSKIN:
self["output"].setStyle("HD")
else:
self["output"].setStyle("default")
self["output"].setList(out)
title = [ _("Reader Statistics"), title2 ]
self.setTitle( " ".join(title))
class OscamInfoConfigScreen(Screen, ConfigListScreen):
def __init__(self, session, msg = None):
Screen.__init__(self, session)
self.session = session
if msg is not None:
self.msg = "Error:\n%s" % msg
else:
self.msg = ""
self.oscamconfig = [ ]
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self["status"] = StaticText(self.msg)
self["config"] = ConfigList(self.oscamconfig)
self["actions"] = ActionMap(["SetupActions", "ColorActions"],
{
"red": self.cancel,
"green": self.save,
"save": self.save,
"cancel": self.cancel,
"ok": self.save,
}, -2)
ConfigListScreen.__init__(self, self.oscamconfig, session = self.session)
self.createSetup()
config.oscaminfo.userdatafromconf.addNotifier(self.elementChanged, initial_call = False)
config.oscaminfo.autoupdate.addNotifier(self.elementChanged, initial_call = False)
self.onLayoutFinish.append(self.layoutFinished)
def elementChanged(self, instance):
self.createSetup()
try:
self["config"].l.setList(self.oscamconfig)
except KeyError:
pass
def layoutFinished(self):
self.setTitle(_("Oscam Info - Configuration"))
self["config"].l.setList(self.oscamconfig)
def createSetup(self):
self.oscamconfig = []
self.oscamconfig.append(getConfigListEntry(_("Read Userdata from oscam.conf"), config.oscaminfo.userdatafromconf))
if not config.oscaminfo.userdatafromconf.value:
self.oscamconfig.append(getConfigListEntry(_("Username (httpuser)"), config.oscaminfo.username))
self.oscamconfig.append(getConfigListEntry(_("Password (httpwd)"), config.oscaminfo.password))
self.oscamconfig.append(getConfigListEntry(_("IP-Address"), config.oscaminfo.ip))
self.oscamconfig.append(getConfigListEntry("Port", config.oscaminfo.port))
self.oscamconfig.append(getConfigListEntry(_("Automatically update Client/Server View?"), config.oscaminfo.autoupdate))
if config.oscaminfo.autoupdate.value:
self.oscamconfig.append(getConfigListEntry(_("Update interval (in seconds)"), config.oscaminfo.intervall))
def save(self):
for x in self.oscamconfig:
x[1].save()
configfile.save()
self.close()
def cancel(self):
for x in self.oscamconfig:
x[1].cancel()
self.close()
| popazerty/openblackhole-SH4 | lib/python/Screens/OScamInfo.py | Python | gpl-2.0 | 40,341 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2015 Glencoe Software, Inc. All Rights Reserved.
# Use is subject to license terms supplied in LICENSE.txt
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Test of the automatic JVM setting logic for OMERO startup.
"""
import pytest
from omero.config import ConfigXml, xml
from omero.install.jvmcfg import adjust_settings
from omero.install.jvmcfg import ManualStrategy
from omero.install.jvmcfg import PercentStrategy
from omero.install.jvmcfg import Settings
from omero.install.jvmcfg import Strategy
from omero.install.jvmcfg import strip_dict
from omero.install.jvmcfg import usage_charts
from omero.util.temp_files import create_path
from path import path
from xml.etree.ElementTree import SubElement
from xml.etree.ElementTree import tostring
from xml.etree.ElementTree import XML
from test.unit.test_config import initial
def write_config(data):
p = create_path()
i = initial()
for k, v in data.items():
for x in i[0:2]: # __ACTIVE__ & default
SubElement(x, "property", name=k, value=v)
string = tostring(i, 'utf-8')
txt = xml.dom.minidom.parseString(string).toprettyxml(" ", "\n", None)
p.write_text(txt)
return p
class TestMemoryStrip(object):
def test_1(self):
rv = strip_dict({"a.b": "c"}, prefix="a")
assert {"b": "c"} == rv
def test_2(self):
rv = strip_dict({"a.b.c": "d"}, prefix="a.b")
assert rv["c"] == "d"
def test_3(self):
rv = strip_dict({
"omero.jvmcfg.foo": "a",
"something.else": "b"})
assert rv["foo"] == "a"
assert "something.else" not in rv
@pytest.mark.parametrize("input,output", (
({"omero.jvmcfg.heap_size.blitz": "1g"}, {"heap_size": "1g"}),
))
def test_4(self, input, output):
p = write_config(input)
config = ConfigXml(filename=str(p), env_config="default")
try:
m = config.as_map()
s = strip_dict(m, suffix="blitz")
assert s == output
finally:
config.close()
def test_5(self):
rv = strip_dict({
"omero.jvmcfg.a.blitz": "b",
}, suffix="blitz")
assert rv["a"] == "b"
class TestSettings(object):
def test_initial(self):
s = Settings()
assert s.perm_gen == "128m"
assert s.heap_dump == "off"
assert s.heap_size == "512m"
def test_explicit(self):
s = Settings({
"perm_gen": "xxx",
"heap_dump": "yyy",
"heap_size": "zzz",
})
assert s.perm_gen == "xxx"
assert s.heap_dump == "yyy"
assert s.heap_size == "zzz"
def test_defaults(self):
s = Settings({}, {
"perm_gen": "xxx",
"heap_dump": "yyy",
"heap_size": "zzz",
})
assert s.perm_gen == "xxx"
assert s.heap_dump == "yyy"
assert s.heap_size == "zzz"
def test_both(self):
s = Settings({
"perm_gen": "aaa",
"heap_dump": "bbb",
"heap_size": "ccc",
}, {
"perm_gen": "xxx",
"heap_dump": "yyy",
"heap_size": "zzz",
})
assert s.perm_gen == "aaa"
assert s.heap_dump == "bbb"
assert s.heap_size == "ccc"
class TestStrategy(object):
def test_no_instantiate(self):
with pytest.raises(Exception):
Strategy("blitz")
def test_hard_coded(self):
strategy = ManualStrategy("blitz")
settings = strategy.get_memory_settings()
assert settings == [
"-Xmx512m",
"-XX:MaxPermSize=128m",
"-XX:+IgnoreUnrecognizedVMOptions",
]
def test_percent_usage(self):
strategy = PercentStrategy("blitz")
table = list(strategy.usage_table(15, 16))[0]
assert table[0] == 2**15
assert table[1] == 2**15*15/100
def test_heap_dump_on(self):
settings = Settings({"heap_dump": "on"})
strategy = PercentStrategy("blitz", settings)
hd = strategy.get_heap_dump()
append = strategy.get_append()
assert " " not in hd
assert "HeapDumpPath" not in hd
assert not append
def test_heap_dump_tmp(self):
settings = Settings({"heap_dump": "tmp"})
strategy = PercentStrategy("blitz", settings)
hd = strategy.get_heap_dump()
append = strategy.get_append()
assert " " not in hd
assert "HeapDumpPath" not in hd
assert "HeapDumpPath" in "".join(append)
class AdjustFixture(object):
def __init__(self, input, output, name, **kwargs):
self.input = input
self.output = output
self.name = name
self.kwargs = kwargs
def validate(self, rv):
for k, v in self.output.items():
assert k in rv
found = rv[k]
found.pop(0) # settings
assert v == found, "%s.%s: %s <> %s" % (self.name, k,
v, found)
import json
f = open(__file__[:-3] + ".json", "r")
data = json.load(f)
AFS = []
for x in data:
AFS.append(AdjustFixture(x["input"], x["output"], x["name"]))
def template_xml():
templates = path(__file__) / ".." / ".." / ".."
templates = templates / ".." / ".." / ".."
templates = templates / "etc" / "templates" / "grid" / "templates.xml"
templates = templates.abspath()
return XML(templates.text())
class TestAdjustStrategy(object):
@pytest.mark.parametrize("fixture", AFS, ids=[x.name for x in AFS])
def test_adjust(self, fixture, monkeypatch):
monkeypatch.setattr(Strategy, '_system_memory_mb_java',
lambda x: (2000, 4000))
p = write_config(fixture.input)
xml = template_xml()
config = ConfigXml(filename=str(p), env_config="default")
try:
rv = adjust_settings(config, xml, **fixture.kwargs)
fixture.validate(rv)
finally:
config.close()
@pytest.mark.parametrize("fixture", AFS, ids=[x.name for x in AFS])
def test_12527(self, fixture, monkeypatch):
monkeypatch.setattr(Strategy, '_system_memory_mb_java',
lambda x: (2000, 4000))
p = write_config(fixture.input)
old_templates = path(__file__).dirname() / "old_templates.xml"
xml = XML(old_templates.abspath().text())
config = ConfigXml(filename=str(p), env_config="default")
with pytest.raises(Exception):
adjust_settings(config, xml, **fixture.kwargs)
class TestChart(object):
def test_percent_chart(self):
try:
usage_charts("target/charts.png")
except ImportError:
# Requires matplotlib, etc
pass
| simleo/openmicroscopy | components/tools/OmeroPy/test/unit/test_jvmcfg.py | Python | gpl-2.0 | 7,583 |
#!/usr/bin/python
# Author: Anthony Ruhier
class ArtistNotFoundException(Exception):
pass
| Anthony25/mpd_muspy | mpd_muspy/exceptions.py | Python | bsd-2-clause | 96 |
# -*- coding: utf-8 -*-
import unittest
import re
from StringIO import StringIO
from django.core.management import call_command
from django.db import models
from django.test import TestCase
from freezegun import freeze_time
from mock import Mock
from .forms import OrderDetailsForm
from .middleware import CartMiddleware
from .models import Cart, Order
import context_processors
class DummyProduct(models.Model):
price = models.PositiveIntegerField()
def create_cart():
c = Cart()
c.save()
return c
def create_product(price=1):
dummy = DummyProduct(price=price)
dummy.save()
return dummy
class CartTest(TestCase):
def test_can_create_cart(self):
cart = create_cart()
self.assertIsNotNone(cart)
def test_cart_is_empty(self):
cart = create_cart()
self.assertEqual(0, cart.number_of_items)
def test_add_product(self):
cart = create_cart()
cart.add(create_product())
self.assertEqual(1, cart.number_of_items)
def test_add_multiple_products(self):
cart = create_cart()
cart.add(create_product())
cart.add(create_product())
cart.add(create_product())
self.assertEqual(3, cart.number_of_items)
def test_increase_quantity(self):
cart = create_cart()
product_a = create_product()
product_b = create_product()
cart.add(product_a)
cart.add(product_b)
cart.add(product_a)
self.assertEqual(3, cart.number_of_items)
self.assertEqual(2, cart.number_of_products)
def test_iterate_cart_items(self):
cart = create_cart()
cart.add(create_product())
cart.add(create_product())
cart.add(create_product())
for cart_item in cart.items.all():
self.assertEqual(1, cart_item.product.price)
def test_can_clear_cart(self):
cart = create_cart()
cart.add(create_product())
cart.add(create_product())
cart.add(create_product())
self.assertEqual(3, cart.number_of_items)
cart.clear()
self.assertEqual(0, cart.number_of_items)
def test_total_price(self):
cart = create_cart()
cart.add(create_product(100))
cart.add(create_product(200))
cart.add(create_product(200))
self.assertEqual(500, cart.total_price)
@unittest.skip("Need to mock session")
def test_get_cart(self):
request = Mock()
request2 = Mock()
cart = Cart.objects.for_request(request)
cart_again = Cart.objects.for_request(request)
cart2 = Cart.objects.for_request(request2)
self.assertEqual(cart, cart_again)
self.assertNotEqual(cart, cart2)
def test_can_remove_from_cart(self):
cart = create_cart()
product_a = create_product()
product_b = create_product()
cart.add(product_a)
cart.add(product_b)
cart.add(product_a)
self.assertEqual(cart.number_of_items, 3)
cart.remove(product_a)
self.assertEqual(cart.number_of_items, 2)
class ContextProcessorTest(TestCase):
def test_context_processor(self):
cart = Cart()
request = Mock()
request.cart = cart
context = context_processors.cart(request)
self.assertEqual(context['cart'], cart)
class CartMiddlewareTest(TestCase):
def test_middleware(self):
middleware = CartMiddleware()
request = Mock()
middleware.process_request(request)
self.assertIsNotNone(request.cart)
class OrderDetailsFormTest(TestCase):
def test_valid_form(self):
form = OrderDetailsForm(data={'name': 'bob',
'email': '',
'birthday_year': '1970',
'birthday_month': '12',
'birthday_day': '24',
'street': 'Seasame',
'city': 'Wonderland',
'postal_code': '12345'})
self.assertTrue(form.is_valid())
def test_too_young(self):
form = OrderDetailsForm(data={'name': 'bob',
'email': '',
'birthday_year': '1997',
'birthday_month': '12',
'birthday_day': '24',
'street': 'Seasame',
'city': 'Wonderland',
'postal_code': '12345'})
self.assertFalse(form.is_valid())
def test_postal_code_no_letters(self):
form = OrderDetailsForm(data={'name': 'bob',
'email': '',
'birthday_year': '1970',
'birthday_month': '12',
'birthday_day': '24',
'street': 'Seasame',
'city': 'Wonderland',
'postal_code': 'wicked'})
self.assertFalse(form.is_valid())
class OrderTest(TestCase):
def test_code_generation(self):
code = Order.generate_code('bob', 4)
regex = re.compile(r'[B|O]')
self.assertEqual(4, len(code))
self.assertTrue(regex.match(code))
class CartCleanupCommandTest(TestCase):
def test_no_old_found(self):
create_cart()
create_cart()
create_cart()
initial_count = Cart.objects.all().count()
self.assertEqual(3, initial_count)
command_output = StringIO()
call_command('cart_cleanup', stdout=command_output)
self.assertEqual('Deleted 0 old cart objects\n', command_output.getvalue())
def test_two_old_found(self):
create_cart()
freezer = freeze_time("2014-03-20 12:00:01")
freezer.start()
create_cart()
create_cart()
freezer.stop()
command_output = StringIO()
call_command('cart_cleanup', stdout=command_output)
post_count = Cart.objects.all().count()
self.assertEqual('Deleted 2 old cart objects\n', command_output.getvalue())
self.assertEqual(1, post_count)
| eliasson/boutique | boutique/checkout/tests.py | Python | gpl-2.0 | 6,380 |
import re
text = u'Français złoty Österreich'
pattern = r'\w+'
ascii_pattern = re.compile(pattern, re.ASCII)
unicode_pattern = re.compile(pattern)
print('Text :', text)
print('Pattern :', pattern)
print('ASCII :', list(ascii_pattern.findall(text)))
print('Unicode :', list(unicode_pattern.findall(text)))
| jasonwee/asus-rt-n14uhp-mrtg | src/lesson_text/re_flags_ascii.py | Python | apache-2.0 | 315 |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import os
import platform
import sys
import py_utils
def GetOSAndArchForCurrentDesktopPlatform():
os_name = GetOSNameForCurrentDesktopPlatform()
return os_name, GetArchForCurrentDesktopPlatform(os_name)
def GetOSNameForCurrentDesktopPlatform():
if py_utils.IsRunningOnCrosDevice():
return 'chromeos'
if sys.platform.startswith('linux'):
return 'linux'
if sys.platform == 'darwin':
return 'mac'
if sys.platform == 'win32':
return 'win'
return sys.platform
def GetArchForCurrentDesktopPlatform(os_name):
if os_name == 'chromeos':
# Current tests outside of telemetry don't run on chromeos, and
# platform.machine is not the way telemetry gets the arch name on chromeos.
raise NotImplementedError()
return platform.machine()
def GetChromeApkOsVersion(version_name):
version = version_name[0]
assert version.isupper(), (
'First character of versions name %s was not an uppercase letter.')
if version < 'L':
return 'k'
if version > 'M':
return 'n'
return 'l'
def ChromeBinariesConfigPath():
return os.path.realpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'chrome_binaries.json'))
| catapult-project/catapult | common/py_utils/py_utils/dependency_util.py | Python | bsd-3-clause | 1,383 |
# coding=utf-8
"""
InaSAFE Disaster risk assessment tool developed by AusAid and World Bank
- **Ftp Client for Retrieving ftp data.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'imajimatika@gmail.com'
__version__ = '0.5.0'
__date__ = '19/07/2012'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import urllib2
import os
from BeautifulSoup import BeautifulSoup
netcdf_url = 'http://bfews.pusair-pu.go.id/Sobek-Floodmaps/'
_download_directory = '/home/sunnii/Documents/inasafe/inasafe_real_flood' \
'/forecasting_data/'
def _read_contents(url):
"""Read contents of the url.
Auxiliary function to read and return file urls.
Args:
* url = URL where the file is published
Returns:
* list of filename that can be used directly, e.g. with wget
after concat it with netcdf_url
"""
# proxy_handler = urllib2.ProxyHandler({'http': '218.54.201.168:80'})
# opener = urllib2.build_opener(proxy_handler)
fid = urllib2.urlopen(url)
# fid = opener.open(url)
html = fid.read()
soup = BeautifulSoup(html)
soup_table = soup.findAll('table')[0]
soup_row = soup_table.findAll('tr')
list_cell = []
for row in soup_row:
if len(row.findAll('td')) > 0:
list_cell.append(row.findAll('td')[1])
# get cell contains a tag
list_inner_cells = []
for cell in list_cell:
if (len(cell.findAll('a')[0])) > 0:
list_inner_cells.append(cell.findAll('a')[0])
# get href
list_name = []
for inner_cell in list_inner_cells:
# if inner_cell.has_key('href'):
try:
list_name.append(inner_cell['href'])
except KeyError:
pass
return list_name
def list_all_netcdf_files(url=netcdf_url):
"""Public function to get list of files in the server
:param url: The netcdf file source.
"""
print 'Listing all netcdf file from %s' % url
list_all_files = _read_contents(url)
retval = []
for my_file in list_all_files[200:]:
if my_file.endswith('.nc'):
retval.append(str(my_file))
return retval
def download_file_url(url, download_directory=_download_directory, name=None):
"""Download file for one file.
:param url: URL where the file is published
:param download_directory: The local directory to save the file.
:param name: Optional parameter to select one file. If omitted, latest
file will be used.
:return: Instance of file containing name
"""
# checking file in url directory
names = list_all_netcdf_files(url)
if name is None:
name = names[-1]
print 'Getting file for latest file, which is %s' % name
elif name not in names:
print ('Can not download %s. File is not exist in %s' % (name, url))
return False
else:
print 'Getting file for selected file, which is %s' % name
local_file_path = os.path.join(download_directory, name)
# check local file, if exist, don't download
if os.path.isfile(local_file_path):
print 'But, file is exist, so use your local file.'
return str(local_file_path)
# directory management
cwd = os.getcwd()
os.chdir(download_directory)
# download
cmd = 'wget %s' % (url + name)
print cmd
os.system(cmd)
# make sure the file has been downloaded
if os.path.isfile(local_file_path):
print ('File has been downloaded to %s' %
os.path.join(download_directory, name))
retval = local_file_path
else:
print 'wow, file is not downloaded'
retval = False
os.chdir(cwd)
return str(retval)
if __name__ == '__main__':
download_file_url(netcdf_url, download_directory=_download_directory)
print 'fin'
| opengeogroep/inasafe | realtime/download_netcdf.py | Python | gpl-3.0 | 4,113 |
import righteous
from ConfigParser import SafeConfigParser
from ..compat import unittest
class RighteousIntegrationTestCase(unittest.TestCase):
def setUp(self):
config = SafeConfigParser()
config.read('righteous.config')
if not config.has_section('auth'):
raise Exception('Please create a righteous.config file with '
'appropriate credentials')
self.auth = dict(
(key, config.get('auth', key))
for key in config.options('auth'))
self.server = dict(
(key, config.get('server-defaults', key))
for key in config.options('server-defaults'))
righteous.init(
self.auth['username'], self.auth['password'],
self.auth['account_id'], **self.server)
self.config = config
self.username = self.auth['username']
def test_login(self):
self.assertTrue(righteous.login())
| michaeljoseph/righteous | tests/integration/base.py | Python | unlicense | 954 |
#
# IIT Kharagpur - Hall Management System
# System to manage Halls of residences, Warden grant requests, student complaints
# hall worker attendances and salary payments
#
# MIT License
#
"""
@ authors: Madhav Datt, Avikalp Srivastava
"""
import ctypes
import mysql.connector
import time
from mysql.connector import errorcode
from datetime import date
def connect():
"""
Set initial connection with MySQL server, with hmsuser credentials
Handle errors through codes
Function only opens connection, does not close
"""
# Sign-in credentials for MySQL server
config = {
'user': 'hmsuser',
'password': 'hmspasstmp',
'host': 'localhost',
'database': 'hmskgp',
}
try:
cnx = mysql.connector.connect(**config)
cnx.autocommit = True
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
ctypes.windll.user32.MessageBoxA(0, "Incorrect credentials, please contact your administrator",
"Database Error", 1)
elif err.errno == errorcode.ER_BAD_DB_ERROR:
ctypes.windll.user32.MessageBoxA(0, "Database does not exist, please contact your administrator",
"Database Error", 1)
else:
ctypes.windll.user32.MessageBoxA(0, err, " Please contact your administrator",
"Database Error", 1)
else:
return cnx
return None
def add(table, **param):
"""
Add tuple to specified database table
Order of database entries must be followed exactly
Example:
add("student", password = "secretword", name = "John", address = "home",
contact_number = "9876543210", hall_ID = 4, room_no = "B-334",
mess_charge = 250.5, room_type = "S")
"""
# MySQL statements to add to tables
# Passed parameters must exactly match attribute order
# Creates row with default values:
# string_parameter: 'parameter'
# numerical_value: 0
# boolean_value: 'False'
add_student = ("INSERT INTO student "
"(password, name, address, contact_number, hall_ID, room_no, mess_charge, room_type) "
"VALUES ('password', 'name', 'address', 'contact', 0, 'room', 0, 'U')")
add_warden = ("INSERT INTO warden "
"(password, name, email, hall_ID, controlling_warden) "
"VALUES ('password', 'name', 'email', 0, 'False')")
add_hall = ("INSERT INTO hall "
"(name, warden_ID, clerk_ID, mess_manager_ID, status, \
single_room_count, double_room_count, single_room_occupancy, \
double_room_occupancy, single_room_rent, double_room_rent, \
amenities_charge, mess_account, amenities_account, repair_account, \
salary_account, others_account, rent_account) "
"VALUES ('name', 0, 0, 0, 'U', 0, 0, 0, 0, 0, 0, 0, 0, \
0, 0, 0, 0, 0)")
add_worker = ("INSERT INTO worker "
"(password, name, worker_type, monthly_salary, daily_wage, \
hall_ID, monthly_attendance) "
"VALUES ('password', 'name', 'U', 0, 0, 0, 0)")
add_complaint = ("INSERT INTO complaint "
"(student_ID, action_status, description, action_report) "
"VALUES (0, 'U', 'description', 'action report')")
add_hmc = ("INSERT INTO hmc "
"(password, payment_is_active) "
"VALUES ('password', 'False')")
add_grant_request = ("INSERT INTO grant_request "
"(repair_charge, other_charge, salary_charge, hall_ID) "
"VALUES (0, 0, 0, 0)")
cnx = connect()
cursor = cnx.cursor()
# Insert new row of data into table
if table == "student":
cursor.execute(add_student) # , param)
elif table == "warden":
cursor.execute(add_warden, param)
elif table == "hall":
cursor.execute(add_hall, param)
elif table == "worker":
cursor.execute(add_worker, param)
elif table == "complaint":
cursor.execute(add_complaint, param)
elif table == "hmc":
cursor.execute(add_hmc, param)
elif table == "grant_request":
cursor.execute(add_grant_request, param)
else:
ctypes.windll.user32.MessageBoxA(0, "Table not recognized. Insert failed",
"Database Error", 1)
return None
# Commit to database
cnx.commit()
primary_key = cursor.lastrowid
cursor.close()
cnx.close()
return primary_key
def update(table, primary_key, field, value):
"""
Update field value in table using corresponding Primary Key
Can only update one field at a time
"""
# Sanitizing database inputs
value = str(value).replace("\'", " ")
value = str(value).replace("\"", " ")
value = str(value).replace("-", " ")
value = str(value).replace(";", " ")
value = str(value).replace("*", " ")
cnx = connect()
cursor = cnx.cursor()
update_row = "UPDATE {} SET {} = '{}' WHERE {} = {}"
# Update row of data from table
if table == "student":
cursor.execute(update_row.format(table, field, value, "student_ID", primary_key))
elif table == "warden":
cursor.execute(update_row.format(table, field, value, "warden_ID", primary_key))
elif table == "hall":
cursor.execute(update_row.format(table, field, value, "hall_ID", primary_key))
elif table == "worker":
cursor.execute(update_row.format(table, field, value, "worker_ID", primary_key))
elif table == "complaint":
cursor.execute(update_row.format(table, field, value, "complaint_ID", primary_key))
elif table == "hmc":
update_row = "UPDATE {} SET {} = '{}'"
cursor.execute(update_row.format(table, field, value))
elif table == "grant_request":
cursor.execute(update_row.format(table, field, value, "grant_ID", primary_key))
else:
ctypes.windll.user32.MessageBoxA(0, "Table not recognized. Update failed",
"Database Error", 1)
# Commit to database
cnx.commit()
cursor.close()
cnx.close()
def update_attend_date():
"""
Update last date where worker attendance is marked
"""
cnx = connect()
cursor = cnx.cursor()
update_row = "UPDATE attend_date SET last_attend = '{}'"
cursor.execute(update_row.format(time.strftime('%Y-%m-%d')))
# Commit to database
cnx.commit()
cursor.close()
cnx.close()
def get_attend_date():
"""
Query database to get last date where worker attendance is marked
"""
cnx = connect()
cursor = cnx.cursor()
query = "SELECT {} FROM {}"
cursor.execute(query.format("last_attend", "attend_date"))
# Get queried value as array with one data value
queried = cursor.fetchone()
cursor.close()
cnx.close()
return queried[0]
def get(table, primary_key, field):
"""
Query database to get field from a particular table using Primary Key value
Can only get one field at a time
"""
cnx = connect()
cursor = cnx.cursor()
query = "SELECT {} FROM {} WHERE {} = {}"
# Query data from table
if table == "student":
cursor.execute(query.format(field, table, "student_ID", primary_key))
elif table == "warden":
cursor.execute(query.format(field, table, "warden_ID", primary_key))
elif table == "hall":
cursor.execute(query.format(field, table, "hall_ID", primary_key))
elif table == "worker":
cursor.execute(query.format(field, table, "worker_ID", primary_key))
elif table == "complaint":
cursor.execute(query.format(field, table, "complaint_ID", primary_key))
elif table == "grant_request":
cursor.execute(query.format(field, table, "grant_ID", primary_key))
elif table == "hmc":
query = "SELECT {} FROM {}"
cursor.execute(query.format(field, table))
else:
ctypes.windll.user32.MessageBoxA(0, "Table not recognized. Query failed",
"Database Error", 1)
cursor.close()
cnx.close()
return None
# Get queried value as array with one data value
queried = cursor.fetchall()
cursor.close()
cnx.close()
return queried[0]
def delete(table, primary_key):
"""
Delete value from table corresponding to Primary Key
"""
cnx = connect()
cursor = cnx.cursor()
delete_row = "DELETE FROM {} WHERE {} = {}"
# Delete row of data from table
if table == "student":
cursor.execute(delete_row.format(table, "student_ID", primary_key))
elif table == "warden":
cursor.execute(delete_row.format(table, "warden_ID", primary_key))
elif table == "hall":
cursor.execute(delete_row.format(table, "hall_ID", primary_key))
elif table == "worker":
cursor.execute(delete_row.format(table, "worker_ID", primary_key))
elif table == "complaint":
cursor.execute(delete_row.format(table, "complaint_ID", primary_key))
elif table == "grant_request":
cursor.execute(delete_row.format(table, "grant_ID", primary_key))
else:
ctypes.windll.user32.MessageBoxA(0, "Table not recognized. Delete failed",
"Database Error", 1)
cnx.commit()
cursor.close()
cnx.close()
| madhav-datt/kgp-hms | src/database/db_func.py | Python | mit | 9,569 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import logging
import os
from django.core.management.base import BaseCommand
from django.core.management import call_command
from optparse import make_option
from snisi_core.models.Projects import Domain
logger = logging.getLogger(__name__)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--update',
help='Update .po files',
action='store_true',
dest='update'),
make_option('--compile',
help='Compile .po files into .mo files',
action='store_true',
dest='compile'),
)
def handle(self, *args, **options):
domains = [domain.module_path for domain in Domain.active.all()] + \
['snisi_web', 'snisi_core', 'snisi_sms', 'snisi_tools']
root = os.getcwdu()
if options.get('update'):
logger.info("Updating PO files…")
for domain in domains:
if not os.path.exists(os.path.join(domain, 'locale')):
continue
logger.info("..{}".format(domain))
os.chdir(domain)
call_command("makemessages", locale=["fr"])
os.chdir(root)
os.chdir(root)
if options.get('compile'):
logger.info("Compiling MO files…")
for domain in domains:
if not os.path.exists(os.path.join(domain, 'locale')):
continue
logger.info("..{}".format(domain))
os.chdir(domain)
call_command("compilemessages", locale=["fr"])
os.chdir(root)
os.chdir(root)
| yeleman/snisi | snisi_core/management/commands/i18n.py | Python | mit | 1,910 |
class Solution(object):
def findMaxForm(self, strs, m, n):
"""
:type strs: List[str]
:type m: int
:type n: int
:rtype: int
"""
L = len(strs)
count1 = [0] * L
count0 = [0] * L
for i, s in enumerate(strs):
count0[i] = s.count('0')
count1[i] = s.count('1')
memo = [
[
[0 if i == 0 else None] * (n+1)
for j in xrange(m+1)
]
for i in xrange(len(strs)+1)]
def find(l, i, j):
if memo[l][i][j] is None:
res = find(l-1, i, j)
li, lj = i - count0[l-1], j - count1[l-1]
if li >= 0 and lj >= 0:
res = max(res, 1+find(l-1, li, lj) )
memo[l][i][j] = res
return memo[l][i][j]
return find( L, m, n )
print Solution().findMaxForm(["10", "0001", "111001", "1", "0"], 5, 3)
print Solution().findMaxForm(["10", "0", "1"], 1, 1) | xiaonanln/myleetcode-python | src/474. Ones and Zeroes.py | Python | apache-2.0 | 790 |
#===islucyplugin===
# -*- coding: utf-8 -*-
# Lucy's Plugin
# presence_plugin.py
# Initial Copyright © 2002-2005 Mike Mintz <mikemintz@gmail.com>
# Modifications Copyright © 2007 Als <Als@exploit.in>
# Modifications Copyright © 2007 dimichxp <dimichxp@gmail.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
check_pending=[]
def handler_presence_ra_change(prs):
groupchat = prs.getFrom().getStripped()
nick = prs.getFrom().getResource()
jid = get_true_jid(groupchat+'/'+nick)
item = findPresenceItem(prs)
if jid in GLOBACCESS:
return
else:
if groupchat in ACCBYCONFFILE and jid in ACCBYCONFFILE[groupchat]:
pass
else:
if groupchat in GROUPCHATS and nick in GROUPCHATS[groupchat]:
if jid != None:
role = item['role']
aff = item['affiliation']
if role in ROLES:
accr = ROLES[role]
if role=='moderator' or user_level(jid,groupchat)>=15:
GROUPCHATS[groupchat][nick]['ismoder'] = 1
else:
GROUPCHATS[groupchat][nick]['ismoder'] = 0
else:
accr = 0
if aff in AFFILIATIONS:
acca = AFFILIATIONS[aff]
else:
acca = 0
access = accr+acca
change_access_temp(groupchat, jid, access)
def handler_presence_nickcommand(prs):
groupchat = prs.getFrom().getStripped()
if groupchat in GROUPCHATS:
code = prs.getStatusCode()
if code == '303':
nick = prs.getNick()
else:
nick = prs.getFrom().getResource()
nicksource=nick.split()[0].strip().lower()
if nicksource in (COMMANDS.keys() + MACROS.gmacrolist.keys() + MACROS.macrolist[groupchat].keys()):
order_kick(groupchat, nick, get_bot_nick(groupchat)+u' :your nickname is invalid here')
def iqkeepalive_and_s2scheck():
for gch in GROUPCHATS.keys():
iq=xmpp.Iq()
iq = xmpp.Iq('get')
id = 'p'+str(random.randrange(1, 1000))
globals()['check_pending'].append(id)
iq.setID(id)
iq.addChild('ping', {}, [], 'urn:xmpp:ping')
iq.setTo(gch+'/'+get_gch_info(gch, 'nick'))
JCON.SendAndCallForResponse(iq, iqkeepalive_and_s2scheck_answ,{})
try:
threading.Timer(100, iqkeepalive_and_s2scheck).start()
except RuntimeError:
pass
def iqkeepalive_and_s2scheck_answ(coze, res):
id = res.getID()
if id in globals()['check_pending']:
globals()['check_pending'].remove(id)
else:
print 'someone is doing wrong...'
return
if res:
gch,error=res.getFrom().getStripped(),res.getErrorCode()
if error in ['405',None]:
pass
else:
try:
threading.Timer(60, join_groupchat,(gch,get_gch_info(gch, 'nick') if get_gch_info(gch, 'nick') else DEFAULT_NICK, get_gch_info(gch, 'passw'))).start()
except RuntimeError:
pass
register_presence_handler(handler_presence_ra_change)
register_presence_handler(handler_presence_nickcommand)
register_stage2_init(iqkeepalive_and_s2scheck) | XtremeTeam/Lucy-bot | brain/plugins/presence.py | Python | gpl-2.0 | 3,227 |
'''
Created on 24 Feb 2015
@author: oche
'''
from __future__ import unicode_literals
from __future__ import division
import argparse
import os
import sys
import time
import re
import logging
import json
import numpy
from plotter import makeSubPlot
from os.path import expanduser
from util import validURLMatch, validYoutubeURLMatch
from decimal import *
getcontext().prec = 3
try:
from pymediainfo import MediaInfo
except:
from util.pymediainfo import MediaInfo
# //add youtube-dl to the python path
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)) , "youtube-dl"))
from util import cleanResults
from util import getMean
from youtube_dl import YoutubeDL
import database.vEQ_database as DB
import processmonitor.processMonitor as procmon
from powermonitor.voltcraftmeter import VoltcraftMeter
# TODO: Set logging level from argument
def makeDefaultDBFolder():
home = expanduser("~")
print home
video_download_folder = os.path.join(home, "vEQ-benchmark")
if not os.path.exists(video_download_folder):
os.makedirs(video_download_folder)
return video_download_folder
vlc_verbosity = -1
default_youtube_quality= 'bestvideo'
benchmark_duration = 20#or -1 for length of video
meter = None
default_folder= makeDefaultDBFolder()
default_database = os.path.join( default_folder, "vEQ_db.sqlite")
logging.getLogger().setLevel(logging.DEBUG)
def main(argv=None):
parser = argparse.ArgumentParser(description="vEQ-benchmark: A Benchmarking and Measurement Tool for Video")
parser.add_argument("video" , metavar="VIDEO", help="A local file or URL(Youtube, Vimeo etc.) for the video to be benchmarked")
parser.add_argument("-y", "--youtube-format", metavar="format", dest="youtube_quality", default=default_youtube_quality, help="For Youtube videos, a value that corressponds to the quality level see youtube-dl for details")
parser.add_argument("-m", "--power-meter", metavar="meter", dest="meter", default='voltcraft', help="The meter to use for power measurement TODO: Expand this")
parser.add_argument("-d", "--duration", metavar="Duration", dest="benchmark_duration", default=60, type=int, help="The length of time in seconds for the benchmark to run.")
parser.add_argument("-D", "--Database-location", dest="db_loc", metavar ="location for database file or \'memory\'", help = "A absolute location for storing the database file ")
parser.add_argument("-P", "--plot", dest="to_plot", action='store_true', help="Flag to set if this session should be plotted")
parser.add_argument("-S", "--show", dest="to_show", action='store_true', help="Flag to set if the plot of this should be displayed on the screen after a session is completed")
parser.add_argument("-p", "--player", metavar="player", dest="system_player", default="libvlc", help="The Player to use to playback video - default is VLC MediaPlayer")
parser.add_argument("--hwdecode", dest="hw_decode", action='store_true', help="VLC Specific, turn hardware decoding on")
# TODO: implement dynamic power metering VoltcraftMeter
args = parser.parse_args()
video = args.video
benchmark_duration = args.benchmark_duration
youtube_quality =args.youtube_quality
db_loc = args.db_loc
to_show = args.to_show
to_plot = args.to_plot
m = args.meter
system_player = args.system_player
hw_decode = args.hw_decode
video_title = None
video_data = None
video_codec = None
video_height = None
video_width = None
file_size = None
video_url = None
online_video = False
if db_loc is None:
db_loc = default_database
logging.info("Started VEQ_Benchmark")
#TODO: Extract this from here
implementedPowerMeters = {
"voltcraft": VoltcraftMeter()
}
meter = implementedPowerMeters.get(m,None)
# can inject dependency here i.e power meter or smc or bios or batterty
# meter_type = parser.parse_args().meter
# meter = Meter(meter_type)
if meter is None:
logging.warning("No power monitoring device found")
elif meter.initDevice() is None:
meter = None
logging.warning("No power monitoring device found")
vEQdb = DB.vEQ_database(db_loc)
start_time = time.time()
cpu = procmon.get_processor()
os_info = procmon.get_os()
gpu = procmon.get_gpu()
specs =procmon.get_specs()
values = [start_time,os_info,cpu, gpu,specs]
sys_info_index = vEQdb.insertIntoSysInfoTable(values)
if not validURLMatch(video) and not (os.access(video, os.R_OK)):
print('Error: %s file not readable' % video)
logging.error('Error: %s file not readable' % video)
sys.exit(1)
try:
if not validURLMatch(video):
logging.debug("Found regular video - using MediaInfo to extract details")
video_url = video
video_info = MediaInfo.parse(video)
video_data = video_info.to_json()
for track in video_info.tracks:
if track.track_type == 'Video':
video_title = track.title
video_codec = track.codec
video_height = track.height
video_width = track.width
elif validURLMatch(video):
online_video = True
logging.debug("Found online video: Using youtube-dl to get information")
if validYoutubeURLMatch(video):
logging.debug("Found YouTube video: Using Youtube-dl to get information")
youtube_dl_opts = {
'format' : youtube_quality,
'quiet' : True
}
with YoutubeDL(youtube_dl_opts) as ydl:
try:
def getInfoDictValue(value, infodict):
try:
return infodict.get(value,"N,A")
except:
string = "Couldn't retrieve value " + str(value) +" from YoutubeDL"
logging.error(string)
sys.stderr.write(string)
if value == 'url':
sys.exit(1)
return "N/A"
info_dict = ydl.extract_info(video, download=False)
video = getInfoDictValue("url", info_dict)
video_title = info_dict.get('title',"None")
video_data = str(json.dumps(info_dict))
video_codec = info_dict['format']
video_height = info_dict['height']
video_width = info_dict['width']
file_size = info_dict.get('filesize', "None")
video_url = video
except:
error = sys.exc_info()
logging.error("Unexpected error while retrieve details using Youtube-DL: " + str(error))
video_codec, video_height, video_width = "Null",-1,-1
except:
error = sys.exc_info()
logging.error("Could not retrive video format information: " + str(error))
video_data = str(error)
video_codec, video_height, video_width = "Null",-1,-1
"""
values = [timestamp INT, name TEXT, specs TEXT, codec TEXT, width TEXT, height TEXT ]
"""
video_values = [start_time,video,video_data,video_codec,video_width,video_height]
video_index = vEQdb.insertIntoVideoInfoTable(video_values)
#==========================================VLC VIDEO SPECIFIC ===============
if system_player == "libvlc":
from videoInput.veqplayback import VLCPlayback
vlc_args = "--video-title-show --video-title-timeout 10 --sub-source marq --sub-filter marq " + "--verbose " + str(vlc_verbosity)
if hw_decode:
vlc_args = vlc_args + "--avcodec-hw=any"
vEQPlayback = VLCPlayback(video,vEQdb,vlc_args,meter)
logging.debug("Starting Playback with VLC")
vEQPlayback.startPlayback(benchmark_duration)
else:
# use subprocess to start video player and montioring!
# GenericPlaybackObject.startPlayback(benchmarkduration)
from videoInput.genericPlayback import GenericPlayback
generic_command = "/usr/bin/omxplayer"
generic_command = '/usr/bin/vlc-wrapper --avcodec-hw=any'
generic_command = 'start chrome'
workload = "../gopro.mp4" # pass this from cmd line or something
genericPlayback = GenericPlayback(workload=video,db=vEQdb,cmd=generic_command,meter=meter)
genericPlayback.startPlayback(benchmark_duration)
end_time = time.time()
total_duration = end_time - start_time
powers = vEQdb.getValuesFromPowerTable(start_time, end_time)
cpus = vEQdb.getCPUValuesFromPSTable(start_time, end_time)
memorys = vEQdb.getMemValuesFromPSTable(start_time, end_time)
reads = vEQdb.getValuesFromPSTable("io_bytesread", start_time, end_time)
writes = vEQdb.getValuesFromPSTable("io_byteswrite", start_time, end_time)
net_r = vEQdb.getValuesFromPSTable("net_recv", start_time, end_time)
def getDataRateFromArray(arry):
data_volume = 0
try:
data_volume = arry[-1] - arry[0]
except IndexError:
logging.error("Something went wrong with collecting data from array: " + str(arry.__namespace))
return data_volume
data_transferred = getDataRateFromArray(net_r)
data_read_from_io = getDataRateFromArray(reads)
data_writes_from_io = getDataRateFromArray(writes)
'''
http://stackoverflow.com/questions/4029436/subtracting-the-current-and-previous-item-in-a-list
'''
bitrate = [y - x for x,y in zip(net_r,net_r[1:])]
io_readrate = [y - x for x,y in zip(reads,reads[1:])]
io_writerate = [y - x for x,y in zip(writes,writes[1:])]
p = numpy.array(powers)
c = numpy.array(cpus)
m = numpy.array(memorys)
# get rid of zeros and negatives
p = p[p>0]
c = c[c>0]
m = m[m>0]
mean_power = getMean(p)
mean_cpu = getMean(c)
mean_memory = getMean(m)
mean_gpu = -1
#TODO: IMplement GPU
mean_bandwidth = str(Decimal(data_transferred * 8) / Decimal(1000000* total_duration))
mean_io_read = str(Decimal(data_read_from_io * 8) / Decimal(1048576 * total_duration))
mean_io_write = str(Decimal(data_writes_from_io * 8) / Decimal(1048576 * total_duration))
video_values = [start_time,video,video_data,video_codec,video_width,video_height]
summary_keys = ("video_name" , "video_url", "video_codec", "video_height", "video_width", "mean_power", "mean_cpu", "mean_memory", "mean_gpu" , "mean_bandwidth" ,"data_transferred", "file_size", "sys_info_FK", "video_info_FK")
summary_values = (video_title, video_url , video_codec, video_height, video_width, mean_power, mean_cpu,
mean_memory, mean_gpu , mean_bandwidth ,data_transferred, file_size, sys_info_index, video_index)
summary_dict = dict(zip(summary_keys, summary_values))
# print summary_dict
vEQdb.insertIntoVEQSummaryTable(summary_values)
# write this to a summary file json and a database
print video_title
try:
video_title = s = re.sub(r"[^\w\s]", '', video_title)
except:
video_title = video
print "============================================="
print "vEQ-Summary"
print "============================================="
print "Video Name: " + str(video_title)
if online_video:
print "Video URL: " + video
print "Benchmark Duration: " + str(end_time - start_time) + "secs"
print "Video Codec: " + str(video_codec)
print "Width: " + str(video_width)
print "Height: " + str(video_height)
print "Mean Power: " + str(mean_power) + "W"
print "Mean CPU Usage: " + str(mean_cpu) + "%"
print "Mean Memory Usage: " + str(mean_memory) + "%"
print "Video Filesize " + "Not Implemented (TODO)"
if online_video:
print "Mean Bandwidth: "+ mean_bandwidth + "Mbps"
print "Video Data Transferred: " + str(float( data_transferred / (1024**2))) + " MB"
print data_read_from_io
print "Video Data read from I/O: " + str(float( data_read_from_io / (1024**2))) + " MB"
print "Video Data written to I/O: " + str(float( data_writes_from_io / (1024**2))) + " MB"
print "============================================="
print "System Information"
print "============================================="
print "O/S: " + os_info
print "CPU Name: " + cpu
print "GPU Name: " + gpu
print "Memory Info: " + "Not Yet Implemented"
print "Disk Info: " + "Not Yet Implemented"
print "Active NIC Info: " + "Not Yet Implemented"
print "============================================="
# to_plot = True
# to_show = False
# TODO implemtent GPU monitoring
# gpus=None
# plot_title = str(video_codec) + "- (" + str(video_title) + ")"
# if True:
# # if to_plot:
# makeSubPlot(start_time=start_time, figure_title=plot_title, cpus=cpus, memorys=memorys, bitrate=bitrate, powers=powers, gpus=gpus, to_show=to_show)
# to_plot = False
to_show = True
# TODO implemtent GPU monitoring
gpus=None
plot_title = str(video_codec) + "- (" + str(video_title) + ")"
if to_plot:
makeSubPlot(start_time=start_time, figure_title=plot_title, cpus=c, memorys=m, bitrate=bitrate, powers=powers, gpus=gpus, to_show=to_show)
if __name__ == '__main__':'['
main()
| oche-jay/vEQ-benchmark | vEQ_benchmark.py | Python | gpl-2.0 | 13,919 |
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Julien Veyssier
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
class SupertoinetteTest(BackendTest):
MODULE = 'supertoinette'
def test_recipe(self):
recipes = self.backend.iter_recipes('fondue')
for recipe in recipes:
full_recipe = self.backend.get_recipe(recipe.id)
assert full_recipe.instructions
assert full_recipe.ingredients
assert full_recipe.title
| laurentb/weboob | modules/supertoinette/test.py | Python | lgpl-3.0 | 1,186 |
from django.core.management.base import BaseCommand
from django_town.oauth2.models import Client
from django_town.core.settings import OAUTH2_SETTINGS
class Command(BaseCommand):
def handle(self, *args, **options):
Client.objects.all().update(available_scope=OAUTH2_SETTINGS.default_scope)
print Client.objects.all()[0].available_scope
# print Client.objects.create(name=args[0], service=Service.objects.get(name=args[1])) | uptown/django-town | django_town/oauth2/management/commands/update_default_scope.py | Python | mit | 452 |
from distutils.core import setup
# Convert README.md to long description
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
long_description = long_description.replace("\r", "") # YOU NEED THIS LINE
except (ImportError, OSError, IOError):
print("Pandoc not found. Long_description conversion failure.")
import io
# pandoc is not installed, fallback to using raw contents
with io.open('README.md', encoding="utf-8") as f:
long_description = f.read()
setup(
name='optimal',
version='0.2.1',
packages=['optimal', 'optimal.algorithms'],
# Include example and test files
package_data={'optimal': ['examples/*.py', 'tests/*.py', 'tests/algorithms/*.py']},
# Include readme
data_files=[('', ['README.md'])],
# Dependencies
install_requires=[
'numpy'
],
# Metadata
author='Justin Lovinger',
license='MIT',
description="A python metaheuristic optimization library. Currently supports Genetic Algorithms, Gravitational Search, and Cross Entropy.",
long_description=long_description,
keywords=['optimization', 'metaheuristic', 'genetic algorithm', 'GA',
'gravitational search algorithm', 'GSA', 'cross entropy'],
url='https://github.com/JustinLovinger/optimal',
)
| JustinLovinger/optimal | setup.py | Python | mit | 1,310 |
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mycroft.audio.services import AudioBackend
class WorkingBackend(AudioBackend):
def __init__(self, config, emitter, name='Working'):
pass
def supported_uris(self):
return ['file', 'http']
def add_list(self, tracks):
pass
def clear_list(self):
pass
def play(self):
pass
def stop(self):
pass
def load_service(base_config, emitter):
instances = [WorkingBackend(base_config, emitter)]
return instances
| aatchison/mycroft-core | test/unittests/audio/services/working/__init__.py | Python | apache-2.0 | 1,071 |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple client to communicate with a Presto server.
"""
from httplib import HTTPConnection, HTTPException
import logging
import json
import socket
from urllib2 import HTTPError, urlopen, URLError
from prestoadmin.util.service_util import lookup_port
from prestoadmin.util.exception import InvalidArgumentError
_LOGGER = logging.getLogger(__name__)
URL_TIMEOUT_MS = 5000
NUM_ROWS = 1000
DATA_RESP = "data"
NEXT_URI_RESP = "nextUri"
class PrestoClient:
response_from_server = {}
# rows returned by the query
rows = []
next_uri = ""
def __init__(self, server, user, port=None):
self.server = server
self.user = user
self.port = port if port else None
def clear_old_results(self):
if self.rows:
self.rows = []
if self.next_uri:
self.next_uri = ''
if self.response_from_server:
self.response_from_server = {}
def execute_query(self, sql, schema="default", catalog="hive"):
"""
Execute a query connecting to Presto server using passed parameters.
Client sends http POST request to the Presto server, page:
"/v1/statement". Header information should
include: X-Presto-Catalog, X-Presto-Schema, X-Presto-User
Args:
sql: SQL query to be executed
schema: Presto schema to be used while executing query
(default=default)
catalog: Catalog to be used by the server
Returns:
True or False exit status
"""
if not sql:
raise InvalidArgumentError("SQL query missing")
if not self.server:
raise InvalidArgumentError("Server IP missing")
if not self.user:
raise InvalidArgumentError("Username missing")
if not self.port:
self.port = lookup_port(self.server)
self.clear_old_results()
headers = {"X-Presto-Catalog": catalog,
"X-Presto-Schema": schema,
"X-Presto-User": self.user}
answer = ''
try:
_LOGGER.info("Connecting to server at: " + self.server +
":" + str(self.port) + " as user " + self.user)
conn = HTTPConnection(self.server, self.port, False,
URL_TIMEOUT_MS)
conn.request("POST", "/v1/statement", sql, headers)
response = conn.getresponse()
if response.status != 200:
conn.close()
_LOGGER.error("Connection error: "
+ str(response.status) + " " + response.reason)
return False
answer = response.read()
conn.close()
self.response_from_server = json.loads(answer)
_LOGGER.info("Query executed successfully")
return True
except (HTTPException, socket.error):
_LOGGER.error("Error connecting to presto server at: " +
self.server + ":" + str(self.port))
return False
except ValueError as e:
_LOGGER.error('Error connecting to Presto server: ' + e.message +
' error from server: ' + answer)
raise e
def get_response_from(self, uri):
"""
Sends a GET request to the Presto server at the specified next_uri
and updates the response
"""
try:
conn = urlopen(uri, None, URL_TIMEOUT_MS)
answer = conn.read()
conn.close()
self.response_from_server = json.loads(answer)
_LOGGER.info("GET request successful for uri: " + uri)
return True
except (HTTPError, URLError) as e:
_LOGGER.error("Error opening the presto response uri: " +
str(e.reason))
return False
def build_results_from_response(self):
"""
Build result from the response
The reponse_from_server may contain up to 3 uri's.
1. link to fetch the next packet of data ('nextUri')
2. TODO: information about the query execution ('infoUri')
3. TODO: cancel the query ('partialCancelUri').
"""
if NEXT_URI_RESP in self.response_from_server:
self.next_uri = self.response_from_server[NEXT_URI_RESP]
else:
self.next_uri = ""
if DATA_RESP in self.response_from_server:
if self.rows:
self.rows.extend(self.response_from_server[DATA_RESP])
else:
self.rows = self.response_from_server[DATA_RESP]
def get_rows(self, num_of_rows=NUM_ROWS):
"""
Get the rows returned from the query.
The client sends GET requests to the server using the 'nextUri'
from the previous response until the servers response does not
contain anymore 'nextUri's. When there is no 'nextUri' the query is
finished
Note that this can only be called once and does not page through
the results.
Parameters:
num_of_rows: to be retrieved. 1000 by default
"""
if num_of_rows == 0:
return []
self.build_results_from_response()
if not self.get_next_uri():
return []
while self.get_next_uri():
if not self.get_response_from(self.get_next_uri()):
return []
if (len(self.rows) <= num_of_rows):
self.build_results_from_response()
return self.rows
def get_next_uri(self):
return self.next_uri
| Svjard/presto-admin | prestoadmin/prestoclient.py | Python | apache-2.0 | 6,210 |
from syscore.objects import missing_data
from dataclasses import dataclass
import datetime as datetime
from copy import copy
import pandas as pd
from sysinit.futures.build_multiple_prices_from_raw_data import (
create_multiple_price_stack_from_raw_data,
)
from sysobjects.dict_of_named_futures_per_contract_prices import (
list_of_price_column_names,
list_of_contract_column_names,
contract_column_names,
setOfNamedContracts,
contract_name_from_column_name,
futuresNamedContractFinalPricesWithContractID,
dictFuturesNamedContractFinalPricesWithContractID,
price_column_names,
price_name,
carry_name,
forward_name,
)
from sysobjects.dict_of_futures_per_contract_prices import (
dictFuturesContractFinalPrices,
)
@dataclass
class singleRowMultiplePrices:
price: float = None
carry: float = None
forward: float = None
price_contract: str = None
carry_contract: str = None
forward_contract: str = None
def concat_with_multiple_prices(self, multiple_prices, timedelta_seconds=1):
new_time_index = multiple_prices.index[-1] + datetime.timedelta(
seconds=timedelta_seconds
)
new_df_row = self.as_aligned_pd_row(new_time_index)
combined_df = pd.concat([pd.DataFrame(multiple_prices), new_df_row], axis=0)
new_multiple_prices = futuresMultiplePrices(combined_df)
return new_multiple_prices
def as_aligned_pd_row(self, time_index: datetime.timedelta) -> pd.DataFrame:
new_dict = {
price_name: self.price,
carry_name: self.carry,
forward_name: self.forward,
contract_name_from_column_name(price_name): self.price_contract,
contract_name_from_column_name(carry_name): self.carry_contract,
contract_name_from_column_name(forward_name): self.forward_contract,
}
new_dict_with_nones_removed = dict(
[(key, value) for key, value in new_dict.items() if value is not None]
)
new_df_row = pd.DataFrame(new_dict_with_nones_removed, index=[time_index])
return new_df_row
class futuresMultiplePrices(pd.DataFrame):
def __init__(self, data):
_check_valid_multiple_price_data(data)
super().__init__(data)
data.index.name = "index" # arctic compatible
@classmethod
## NOT TYPE CHECKING OF ROLL_CALENDAR AS WOULD CAUSE CIRCULAR IMPORT
def create_from_raw_data(
futuresMultiplePrices,
roll_calendar,
dict_of_futures_contract_closing_prices: dictFuturesContractFinalPrices,
):
"""
:param roll_calendar: rollCalendar
:param dict_of_futures_closing_contract_prices: dictFuturesContractPrices with only one column
:return: pd.DataFrame with the 6 columns PRICE, CARRY, FORWARD, PRICE_CONTRACT, CARRY_CONTRACT, FORWARD_CONTRACT
"""
all_price_data_stack = create_multiple_price_stack_from_raw_data(
roll_calendar, dict_of_futures_contract_closing_prices
)
multiple_prices = futuresMultiplePrices(all_price_data_stack)
multiple_prices._is_empty = False
return multiple_prices
@classmethod
def create_empty(futuresMultiplePrices):
"""
Our graceful fail is to return an empty, but valid, dataframe
"""
data = pd.DataFrame(columns=multiple_data_columns)
multiple_prices = futuresMultiplePrices(data)
return multiple_prices
def current_contract_dict(self) -> setOfNamedContracts:
if len(self) == 0:
return missing_data
final_row = self.iloc[-1]
contract_dict = dict(
[(key, final_row[value]) for key, value in contract_column_names.items()]
)
contract_dict = setOfNamedContracts(contract_dict)
return contract_dict
def as_dict(self) -> dictFuturesNamedContractFinalPricesWithContractID:
"""
Split up and transform into dict
:return: dictFuturesContractFinalPricesWithContractID, keys PRICE, FORWARD, CARRY
"""
self_as_dict = {}
for price_column_name in list_of_price_column_names:
contract_column_name = contract_name_from_column_name(price_column_name)
self_as_dict[
price_column_name
] = futuresNamedContractFinalPricesWithContractID(
self[price_column_name],
self[contract_column_name],
price_column_name=price_column_name,
)
self_as_dict = dictFuturesNamedContractFinalPricesWithContractID(self_as_dict)
return self_as_dict
@classmethod
def from_merged_dict(
futuresMultiplePrices,
prices_dict: dictFuturesNamedContractFinalPricesWithContractID,
):
"""
Re-create from dict, eg results of _as_dict
:param prices_dict: dictFuturesContractFinalPricesWithContractID keys PRICE, CARRY, FORWARD
:return: object
"""
multiple_prices_list = []
for key_name in price_column_names.keys():
try:
relevant_data = prices_dict[key_name]
except KeyError:
raise Exception(
"Create multiple prices as dict needs %s as key" % key_name
)
multiple_prices_list.append(relevant_data.as_pd())
multiple_prices_data_frame = pd.concat(multiple_prices_list, axis=1)
# Now it's possible we have more price data for some things than others
# so we forward fill contract_ids; not prices
multiple_prices_data_frame[
list_of_contract_column_names
] = multiple_prices_data_frame[list_of_contract_column_names].ffill()
multiple_prices_object = futuresMultiplePrices(multiple_prices_data_frame)
return multiple_prices_object
def sort_index(self):
df = pd.DataFrame(self)
sorted_df = df.sort_index()
return futuresMultiplePrices(sorted_df)
def update_multiple_prices_with_dict(
self, new_prices_dict: dictFuturesNamedContractFinalPricesWithContractID
):
"""
Given a dict containing prices, forward, carry prices; update existing multiple prices
Because of asynchronicity, we allow overwriting of earlier data
WILL NOT WORK IF A ROLL HAS HAPPENED
:return:
"""
# Add contractid labels to new_prices_dict
# For each key in new_prices dict,
# merge the prices together
# allowing historic updates, but not overwrites of non nan values
# from the updated prices dict
# create a new multiple prices object
current_prices_dict = self.as_dict()
try:
merged_data_as_dict = current_prices_dict.merge_data(new_prices_dict)
except Exception as e:
raise e
merged_data = futuresMultiplePrices.from_merged_dict(merged_data_as_dict)
return merged_data
def drop_trailing_nan(self):
"""
Drop rows where all values are NaN
:return: new futuresMultiplePrices
"""
new_multiple_prices = copy(self)
found_zeros = True
while found_zeros and len(new_multiple_prices) > 0:
last_prices_nan_values = (
new_multiple_prices.isna().iloc[-1][list_of_price_column_names].values
)
if all(last_prices_nan_values):
# drop the last row
new_multiple_prices = new_multiple_prices[:-1]
# Should still be true but let's be careful
found_zeros = True
continue
else:
# Terminate loop
found_zeros = False
# Should terminate anyway let's be sure
break
return futuresMultiplePrices(new_multiple_prices)
def add_one_row_with_time_delta(
self, single_row_prices: singleRowMultiplePrices, timedelta_seconds: int = 1
):
"""
Add a row with a slightly different timestamp
:param single_row_prices: dict of scalars, keys are one or more of 'price','forward','carry','*_contract'
If a contract column is missing, we forward fill
If a price column is missing, we include nans
:return: new multiple prices
"""
new_multiple_prices = single_row_prices.concat_with_multiple_prices(
self, timedelta_seconds=timedelta_seconds
)
new_multiple_prices = new_multiple_prices.forward_fill_contracts()
return new_multiple_prices
def forward_fill_contracts(self):
combined_df = copy(self)
for colname in list_of_contract_column_names:
combined_df[colname] = combined_df[colname].ffill()
return futuresMultiplePrices(combined_df)
def _check_valid_multiple_price_data(data):
data_present = sorted(data.columns)
try:
assert data_present == multiple_data_columns
except AssertionError:
raise Exception("futuresMultiplePrices has to conform to pattern")
multiple_data_columns = sorted(
list_of_price_column_names + list_of_contract_column_names
)
| robcarver17/pysystemtrade | sysobjects/multiple_prices.py | Python | gpl-3.0 | 9,287 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper functions for the layout test analyzer."""
from datetime import datetime
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import fileinput
import os
import pickle
import re
import smtplib
import socket
import sys
import time
import urllib
from bug import Bug
from test_expectations_history import TestExpectationsHistory
class AnalyzerResultMap:
"""A class to deal with joined result produed by the analyzer.
The join is done between layouttests and the test_expectations object
(based on the test expectation file). The instance variable |result_map|
contains the following keys: 'whole','skip','nonskip'. The value of 'whole'
contains information about all layouttests. The value of 'skip' contains
information about skipped layouttests where it has 'SKIP' in its entry in
the test expectation file. The value of 'nonskip' contains all information
about non skipped layout tests, which are in the test expectation file but
not skipped. The information is exactly same as the one parsed by the
analyzer.
"""
def __init__(self, test_info_map):
"""Initialize the AnalyzerResultMap based on test_info_map.
Test_info_map contains all layouttest information. The job here is to
classify them as 'whole', 'skip' or 'nonskip' based on that information.
Args:
test_info_map: the result map of |layouttests.JoinWithTestExpectation|.
The key of the map is test name such as 'media/media-foo.html'.
The value of the map is a map that contains the following keys:
'desc'(description), 'te_info' (test expectation information),
which is a list of test expectation information map. The key of the
test expectation information map is test expectation keywords such
as "SKIP" and other keywords (for full list of keywords, please
refer to |test_expectaions.ALL_TE_KEYWORDS|).
"""
self.result_map = {}
self.result_map['whole'] = {}
self.result_map['skip'] = {}
self.result_map['nonskip'] = {}
if test_info_map:
for (k, value) in test_info_map.iteritems():
self.result_map['whole'][k] = value
if 'te_info' in value:
if any([True for x in value['te_info'] if 'SKIP' in x]):
self.result_map['skip'][k] = value
else:
self.result_map['nonskip'][k] = value
@staticmethod
def GetDiffString(diff_map_element, type_str):
"""Get difference string out of diff map element.
The difference string shows difference between two analyzer results
(for example, a result for now and a result for sometime in the past)
in HTML format (with colors). This is used for generating email messages.
Args:
diff_map_element: An element of the compared map generated by
|CompareResultMaps()|. The element has two lists of test cases. One
is for test names that are in the current result but NOT in the
previous result. The other is for test names that are in the previous
results but NOT in the current result. Please refer to comments in
|CompareResultMaps()| for details.
type_str: a string indicating the test group to which |diff_map_element|
belongs; used for color determination. Must be 'whole', 'skip', or
'nonskip'.
Returns:
a string in HTML format (with colors) to show difference between two
analyzer results.
"""
diff = len(diff_map_element[0]) - len(diff_map_element[1])
if diff == 0:
return 'No Change'
color = ''
if diff > 0 and type_str != 'whole':
color = 'red'
else:
color = 'green'
diff_sign = ''
if diff > 0:
diff_sign = '+'
whole_str = '<font color="%s">%s%d</font>' % (color, diff_sign, diff)
colors = ['red', 'green']
if type_str == 'whole':
# Bug 107773 - when we increase the number of tests,
# the name of the tests are in red, it should be green
# since it is good thing.
colors = ['green', 'red']
str1 = ''
for (name, _) in diff_map_element[0]:
str1 += '<font color="%s">%s,</font>' % (colors[0], name)
str2 = ''
for (name, _) in diff_map_element[1]:
str2 += '<font color="%s">%s,</font>' % (colors[1], name)
if str1 or str2:
whole_str += ':'
if str1:
whole_str += str1
if str2:
whole_str += str2
# Remove the last occurrence of ','.
whole_str = ''.join(whole_str.rsplit(',', 1))
return whole_str
def GetPassingRate(self):
"""Get passing rate.
Returns:
layout test passing rate of this result in percent.
Raises:
ValueEror when the number of tests in test group "whole" is equal
or less than that of "skip".
"""
delta = len(self.result_map['whole'].keys()) - (
len(self.result_map['skip'].keys()))
if delta <= 0:
raise ValueError('The number of tests in test group "whole" is equal or '
'less than that of "skip"')
return 100 - len(self.result_map['nonskip'].keys()) * 100 / delta
def ConvertToCSVText(self, current_time):
"""Convert |self.result_map| into stats and issues text in CSV format.
Both are used as inputs for Google spreadsheet.
Args:
current_time: a string depicting a time in year-month-day-hour
format (e.g., 2011-11-08-16).
Returns:
a tuple of stats and issues_txt
stats: analyzer result in CSV format that shows:
(current_time, the number of tests, the number of skipped tests,
the number of failing tests, passing rate)
For example,
"2011-11-10-15,204,22,12,94"
issues_txt: issues listed in CSV format that shows:
(BUGWK or BUGCR, bug number, the test expectation entry,
the name of the test)
For example,
"BUGWK,71543,TIMEOUT PASS,media/media-element-play-after-eos.html,
BUGCR,97657,IMAGE CPU MAC TIMEOUT PASS,media/audio-repaint.html,"
"""
stats = ','.join([current_time, str(len(self.result_map['whole'].keys())),
str(len(self.result_map['skip'].keys())),
str(len(self.result_map['nonskip'].keys())),
str(self.GetPassingRate())])
issues_txt = ''
for bug_txt, test_info_list in (
self.GetListOfBugsForNonSkippedTests().iteritems()):
matches = re.match(r'(BUG(CR|WK))(\d+)', bug_txt)
bug_suffix = ''
bug_no = ''
if matches:
bug_suffix = matches.group(1)
bug_no = matches.group(3)
issues_txt += bug_suffix + ',' + bug_no + ','
for test_info in test_info_list:
test_name, te_info = test_info
issues_txt += ' '.join(te_info.keys()) + ',' + test_name + ','
issues_txt += '\n'
return stats, issues_txt
def ConvertToString(self, prev_time, diff_map, bug_anno_map,
issue_detail_mode):
"""Convert this result to HTML display for email.
Args:
prev_time: the previous time string that are compared against.
diff_map: the compared map generated by |CompareResultMaps()|.
bug_anno_map: a annotation map where keys are bug names and values are
annotations for the bug.
issue_detail_mode: includes the issue details in the output string if
this is True.
Returns:
a analyzer result string in HTML format.
"""
return_str = ''
if diff_map:
return_str += (
'<b>Statistics (Diff Compared to %s):</b><ul>'
'<li>The number of tests: %d (%s)</li>'
'<li>The number of failing skipped tests: %d (%s)</li>'
'<li>The number of failing non-skipped tests: %d (%s)</li>'
'<li>Passing rate: %d %%</li></ul>') % (
prev_time, len(self.result_map['whole'].keys()),
AnalyzerResultMap.GetDiffString(diff_map['whole'], 'whole'),
len(self.result_map['skip'].keys()),
AnalyzerResultMap.GetDiffString(diff_map['skip'], 'skip'),
len(self.result_map['nonskip'].keys()),
AnalyzerResultMap.GetDiffString(diff_map['nonskip'], 'nonskip'),
self.GetPassingRate())
if issue_detail_mode:
return_str += '<b>Current issues about failing non-skipped tests:</b>'
for (bug_txt, test_info_list) in (
self.GetListOfBugsForNonSkippedTests().iteritems()):
if not bug_txt in bug_anno_map:
bug_anno_map[bug_txt] = ''
else:
bug_anno_map[bug_txt] = '(' + bug_anno_map[bug_txt] + ')'
return_str += '<ul>%s %s' % (Bug(bug_txt), bug_anno_map[bug_txt])
for test_info in test_info_list:
(test_name, te_info) = test_info
gpu_link = ''
if 'GPU' in te_info:
gpu_link = 'group=%40ToT%20GPU%20Mesa%20-%20chromium.org&'
dashboard_link = ('http://test-results.appspot.com/dashboards/'
'flakiness_dashboard.html#%stests=%s') % (
gpu_link, test_name)
return_str += '<li><a href="%s">%s</a> (%s) </li>' % (
dashboard_link, test_name, ' '.join(te_info.keys()))
return_str += '</ul>\n'
return return_str
def CompareToOtherResultMap(self, other_result_map):
"""Compare this result map with the other to see if there are any diff.
The comparison is done for layouttests which belong to 'whole', 'skip',
or 'nonskip'.
Args:
other_result_map: another result map to be compared against the result
map of the current object.
Returns:
a map that has 'whole', 'skip' and 'nonskip' as keys.
Please refer to |diff_map| in |SendStatusEmail()|.
"""
comp_result_map = {}
for name in ['whole', 'skip', 'nonskip']:
if name == 'nonskip':
# Look into expectation to get diff only for non-skipped tests.
lookIntoTestExpectationInfo = True
else:
# Otherwise, only test names are compared to get diff.
lookIntoTestExpectationInfo = False
comp_result_map[name] = GetDiffBetweenMaps(
self.result_map[name], other_result_map.result_map[name],
lookIntoTestExpectationInfo)
return comp_result_map
@staticmethod
def Load(file_path):
"""Load the object from |file_path| using pickle library.
Args:
file_path: the string path to the file from which to read the result.
Returns:
a AnalyzerResultMap object read from |file_path|.
"""
file_object = open(file_path)
analyzer_result_map = pickle.load(file_object)
file_object.close()
return analyzer_result_map
def Save(self, file_path):
"""Save the object to |file_path| using pickle library.
Args:
file_path: the string path to the file in which to store the result.
"""
file_object = open(file_path, 'wb')
pickle.dump(self, file_object)
file_object.close()
def GetListOfBugsForNonSkippedTests(self):
"""Get a list of bugs for non-skipped layout tests.
This is used for generating email content.
Returns:
a mapping from bug modifier text (e.g., BUGCR1111) to a test name and
main test information string which excludes comments and bugs.
This is used for grouping test names by bug.
"""
bug_map = {}
for (name, value) in self.result_map['nonskip'].iteritems():
for te_info in value['te_info']:
main_te_info = {}
for k in te_info.keys():
if k != 'Comments' and k != 'Bugs':
main_te_info[k] = True
if 'Bugs' in te_info:
for bug in te_info['Bugs']:
if bug not in bug_map:
bug_map[bug] = []
bug_map[bug].append((name, main_te_info))
return bug_map
def SendStatusEmail(prev_time, analyzer_result_map, diff_map,
bug_anno_map, receiver_email_address, test_group_name,
appended_text_to_email, email_content, rev_str,
email_only_change_mode):
"""Send status email.
Args:
prev_time: the date string such as '2011-10-09-11'. This format has been
used in this analyzer.
analyzer_result_map: current analyzer result.
diff_map: a map that has 'whole', 'skip' and 'nonskip' as keys.
The values of the map are the result of |GetDiffBetweenMaps()|.
The element has two lists of test cases. One (with index 0) is for
test names that are in the current result but NOT in the previous
result. The other (with index 1) is for test names that are in the
previous results but NOT in the current result.
For example (test expectation information is omitted for
simplicity),
comp_result_map['whole'][0] = ['foo1.html']
comp_result_map['whole'][1] = ['foo2.html']
This means that current result has 'foo1.html' but it is NOT in the
previous result. This also means the previous result has 'foo2.html'
but it is NOT in the current result.
bug_anno_map: bug annotation map where bug name and annotations are
stored.
receiver_email_address: receiver's email address.
test_group_name: string representing the test group name (e.g., 'media').
appended_text_to_email: a text which is appended at the end of the status
email.
email_content: an email content string that will be shown on the dashboard.
rev_str: a revision string that contains revision information that is sent
out in the status email. It is obtained by calling
|GetRevisionString()|.
email_only_change_mode: send email only when there is a change if this is
True. Otherwise, always send email after each run.
"""
if rev_str:
email_content += '<br><b>Revision Information:</b>'
email_content += rev_str
localtime = time.asctime(time.localtime(time.time()))
change_str = ''
if email_only_change_mode:
change_str = 'Status Change '
subject = 'Layout Test Analyzer Result %s(%s): %s' % (change_str,
test_group_name,
localtime)
# TODO(imasaki): remove my name from here.
SendEmail('imasaki@chromium.org', [receiver_email_address],
subject, email_content + appended_text_to_email)
def GetRevisionString(prev_time, current_time, diff_map):
"""Get a string for revision information during the specified time period.
Args:
prev_time: the previous time as a floating point number expressed
in seconds since the epoch, in UTC.
current_time: the current time as a floating point number expressed
in seconds since the epoch, in UTC. It is typically obtained by
time.time() function.
diff_map: a map that has 'whole', 'skip' and 'nonskip' as keys.
Please refer to |diff_map| in |SendStatusEmail()|.
Returns:
a tuple of strings:
1) full string containing links, author, date, and line for each
change in the test expectation file.
2) shorter string containing only links to the change. Used for
trend graph annotations.
3) last revision number for the given test group.
4) last revision date for the given test group.
"""
if not diff_map:
return ('', '', '', '')
testname_map = {}
for test_group in ['skip', 'nonskip']:
for i in range(2):
for (k, _) in diff_map[test_group][i]:
testname_map[k] = True
rev_infos = TestExpectationsHistory.GetDiffBetweenTimes(prev_time,
current_time,
testname_map.keys())
rev_str = ''
simple_rev_str = ''
rev = ''
rev_date = ''
if rev_infos:
# Get latest revision number and date.
rev = rev_infos[-1][1]
rev_date = rev_infos[-1][3]
for rev_info in rev_infos:
(old_rev, new_rev, author, date, _, target_lines) = rev_info
link = urllib.unquote('http://trac.webkit.org/changeset?new=%d%40trunk'
'%2FLayoutTests%2Fplatform%2Fchromium%2F'
'test_expectations.txt&old=%d%40trunk%2F'
'LayoutTests%2Fplatform%2Fchromium%2F'
'test_expectations.txt') % (new_rev, old_rev)
rev_str += '<ul><a href="%s">%s->%s</a>\n' % (link, old_rev, new_rev)
simple_rev_str = '<a href="%s">%s->%s</a>,' % (link, old_rev, new_rev)
rev_str += '<li>%s</li>\n' % author
rev_str += '<li>%s</li>\n<ul>' % date
for line in target_lines:
# Find *.html pattern (test name) and replace it with the link to
# flakiness dashboard.
test_name_pattern = r'(\S+.html)'
match = re.search(test_name_pattern, line)
if match:
test_name = match.group(1)
gpu_link = ''
if 'GPU' in line:
gpu_link = 'group=%40ToT%20GPU%20Mesa%20-%20chromium.org&'
dashboard_link = ('http://test-results.appspot.com/dashboards/'
'flakiness_dashboard.html#%stests=%s') % (
gpu_link, test_name)
line = line.replace(test_name, '<a href="%s">%s</a>' % (
dashboard_link, test_name))
# Find bug text and replace it with the link to the bug.
bug = Bug(line)
if bug.bug_txt:
line = '<li>%s</li>\n' % line.replace(bug.bug_txt, str(bug))
rev_str += line
rev_str += '</ul></ul>'
return (rev_str, simple_rev_str, rev, rev_date)
def SendEmail(sender_email_address, receivers_email_addresses, subject,
message):
"""Send email using localhost's mail server.
Args:
sender_email_address: sender's email address.
receivers_email_addresses: receiver's email addresses.
subject: subject string.
message: email message.
"""
try:
html_top = """
<html>
<head></head>
<body>
"""
html_bot = """
</body>
</html>
"""
html = html_top + message + html_bot
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = sender_email_address
msg['To'] = receivers_email_addresses[0]
part1 = MIMEText(html, 'html')
smtp_obj = smtplib.SMTP('localhost')
msg.attach(part1)
smtp_obj.sendmail(sender_email_address, receivers_email_addresses,
msg.as_string())
print 'Successfully sent email'
except smtplib.SMTPException, ex:
print 'Authentication failed:', ex
print 'Error: unable to send email'
except (socket.gaierror, socket.error, socket.herror), ex:
print ex
print 'Error: unable to send email'
def FindLatestTime(time_list):
"""Find latest time from |time_list|.
The current status is compared to the status of the latest file in
|RESULT_DIR|.
Args:
time_list: a list of time string in the form of 'Year-Month-Day-Hour'
(e.g., 2011-10-23-23). Strings not in this format are ignored.
Returns:
a string representing latest time among the time_list or None if
|time_list| is empty or no valid date string in |time_list|.
"""
if not time_list:
return None
latest_date = None
for time_element in time_list:
try:
item_date = datetime.strptime(time_element, '%Y-%m-%d-%H')
if latest_date is None or latest_date < item_date:
latest_date = item_date
except ValueError:
# Do nothing.
pass
if latest_date:
return latest_date.strftime('%Y-%m-%d-%H')
else:
return None
def ReplaceLineInFile(file_path, search_exp, replace_line):
"""Replace line which has |search_exp| with |replace_line| within a file.
Args:
file_path: the file that is being replaced.
search_exp: search expression to find a line to be replaced.
replace_line: the new line.
"""
for line in fileinput.input(file_path, inplace=1):
if search_exp in line:
line = replace_line
sys.stdout.write(line)
def FindLatestResult(result_dir):
"""Find the latest result in |result_dir| and read and return them.
This is used for comparison of analyzer result between current analyzer
and most known latest result.
Args:
result_dir: the result directory.
Returns:
A tuple of filename (latest_time) and the latest analyzer result.
Returns None if there is no file or no file that matches the file
patterns used ('%Y-%m-%d-%H').
"""
dir_list = os.listdir(result_dir)
file_name = FindLatestTime(dir_list)
if not file_name:
return None
file_path = os.path.join(result_dir, file_name)
return (file_name, AnalyzerResultMap.Load(file_path))
def GetDiffBetweenMaps(map1, map2, lookIntoTestExpectationInfo=False):
"""Get difference between maps.
Args:
map1: analyzer result map to be compared.
map2: analyzer result map to be compared.
lookIntoTestExpectationInfo: a boolean to indicate whether to compare
test expectation information in addition to just the test case names.
Returns:
a tuple of |name1_list| and |name2_list|. |Name1_list| contains all test
name and the test expectation information in |map1| but not in |map2|.
|Name2_list| contains all test name and the test expectation
information in |map2| but not in |map1|.
"""
def GetDiffBetweenMapsHelper(map1, map2, lookIntoTestExpectationInfo):
"""A helper function for GetDiffBetweenMaps.
Args:
map1: analyzer result map to be compared.
map2: analyzer result map to be compared.
lookIntoTestExpectationInfo: a boolean to indicate whether to compare
test expectation information in addition to just the test case names.
Returns:
a list of tuples (name, te_info) that are in |map1| but not in |map2|.
"""
name_list = []
for (name, value1) in map1.iteritems():
if name in map2:
if lookIntoTestExpectationInfo and 'te_info' in value1:
list1 = value1['te_info']
list2 = map2[name]['te_info']
te_diff = [item for item in list1 if not item in list2]
if te_diff:
name_list.append((name, te_diff))
else:
name_list.append((name, value1))
return name_list
return (GetDiffBetweenMapsHelper(map1, map2, lookIntoTestExpectationInfo),
GetDiffBetweenMapsHelper(map2, map1, lookIntoTestExpectationInfo))
| ropik/chromium | media/tools/layout_tests/layouttest_analyzer_helpers.py | Python | bsd-3-clause | 22,667 |
# -*-coding: utf-8 -*-
import colander
from . import (
SelectInteger,
ResourceSchema,
BaseSearchForm,
)
from ..models.service import Service
from ..lib.qb.invoices_items import InvoicesItemsQueryBuilder
class _InvoiceItemSchema(ResourceSchema):
service_id = colander.SchemaNode(
SelectInteger(Service),
)
price = colander.SchemaNode(
colander.Money()
)
vat = colander.SchemaNode(
colander.Money()
)
discount = colander.SchemaNode(
colander.Money(),
)
descr = colander.SchemaNode(
colander.String(),
validator=colander.Length(max=255)
)
class InvoiceItemSearchForm(BaseSearchForm):
_qb = InvoicesItemsQueryBuilder
| mazvv/travelcrm | travelcrm/forms/invoices_items.py | Python | gpl-3.0 | 723 |
#!/usr/bin/env python
"""Celery Py.
Python wrappers for FarmBot Celery Script JSON nodes.
"""
import os
import json
from functools import wraps
import requests
def farmware_api_url():
"""Return the correct Farmware API URL according to FarmBot OS version."""
major_version = int(os.getenv('FARMBOT_OS_VERSION', '0.0.0')[0])
base_url = os.environ['FARMWARE_URL']
return base_url + 'api/v1/' if major_version > 5 else base_url
def _print_json(function):
@wraps(function)
def wrapper(*args, **kwargs):
"""Send Celery Script or return the JSON string.
Celery Script is sent by sending an HTTP POST request to /celery_script
using the url in the `FARMWARE_URL` environment variable.
"""
try:
os.environ['FARMWARE_URL']
except KeyError:
# Not running as a Farmware: return JSON
return function(*args, **kwargs)
else:
# Running as a Farmware: send Celery Script command
farmware_token = os.environ['FARMWARE_TOKEN']
headers = {'Authorization': 'bearer {}'.format(farmware_token),
'content-type': "application/json"}
payload = json.dumps(function(*args, **kwargs))
requests.post(farmware_api_url() + 'celery_script',
data=payload, headers=headers)
return
return wrapper
def log(message, message_type='info', title='plant-detection'):
"""Send a message to the log."""
try:
os.environ['FARMWARE_URL']
except KeyError:
print(message)
else:
log_message = '[{title}] {message}'.format(
title=title, message=message)
send_message(log_message, message_type)
def _encode_coordinates(x_coord, y_coord, z_coord):
coords = {}
coords['x'] = x_coord
coords['y'] = y_coord
coords['z'] = z_coord
return coords
def create_node(kind=None, args=None):
"""Create a kind, args Celery Script node."""
node = {}
node['kind'] = kind
node['args'] = args
return node
def create_pair(label=None, value=None):
"""Create a label, value Celery Script node."""
pair = {}
pair['label'] = label
pair['value'] = value
return pair
def _saved_location_node(pointer_type, pointer_id):
args = {}
if 'tool' in pointer_type.lower():
location_type = 'tool'
args['tool_id'] = pointer_id
else:
location_type = 'point'
args['pointer_type'] = pointer_type
args['pointer_id'] = pointer_id
saved_location = create_node(kind=location_type, args=args)
return saved_location
def _coordinate_node(x_coord, y_coord, z_coord):
coordinates = _encode_coordinates(x_coord, y_coord, z_coord)
coordinate = create_node(kind='coordinate', args=coordinates)
return coordinate
@_print_json
def add_point(point_x, point_y, point_z, point_r):
"""Celery Script to add a point to the database.
Kind:
add_point
Arguments:
Location:
Coordinate (x, y, z)
Radius: r
Body:
Kind: pair
Args:
label: created_by
value: plant-detection
"""
args = {}
args['location'] = _coordinate_node(point_x, point_y, point_z)
args['radius'] = point_r
point = create_node(kind='add_point', args=args)
created_by = create_pair(label='created_by', value='plant-detection')
point['body'] = [create_node(kind='pair', args=created_by)]
return point
@_print_json
def set_user_env(label, value):
"""Celery Script to set an environment variable.
Kind:
set_user_env
Body:
Kind: pair
Args:
label: <ENV VAR name>
value: <ENV VAR value>
"""
_set_user_env = create_node(kind='set_user_env', args={})
env_var = create_pair(label=label, value=value)
_set_user_env['body'] = [create_node(kind='pair', args=env_var)]
return _set_user_env
@_print_json
def move_absolute(location, offset, speed):
"""Celery Script to move to a location.
Kind:
move_absolute
Arguments:
Location:
Coordinate (x, y, z)
Saved Location
['tool', tool_id]
['Plant', pointer_id]
['GenericPointer', pointer_id]
Offset:
Distance (x, y, z)
Speed:
Speed (mm/s)
"""
args = {}
if len(location) == 2:
args['location'] = _saved_location_node(
location[0], location[1])
if len(location) == 3:
args['location'] = _coordinate_node(*location)
args['offset'] = _coordinate_node(*offset)
args['speed'] = speed
_move_absolute = create_node(kind='move_absolute', args=args)
return _move_absolute
@_print_json
def move_relative(distance=(0, 0, 0), speed=800):
"""Celery Script to move relative to the current location.
Kind:
move_relative
Arguments:
x distance (mm)
y distance (mm)
z distance (mm)
Speed (mm/s)
"""
args = _encode_coordinates(*distance)
args['speed'] = speed
_move_relative = create_node(kind='move_relative', args=args)
return _move_relative
@_print_json
def data_update(endpoint, ids_=None):
"""Celery Script to signal that a sync is required.
Kind:
data_update
Args:
value: update
Body:
Kind: pair
Args:
label: endpoint
value: id
"""
args = {}
args['value'] = 'update'
_data_update = create_node(kind='data_update', args=args)
if isinstance(ids_, list):
body = []
for id_ in ids_:
_endpoint = create_pair(label=endpoint, value=str(id_))
body.append(create_node(kind='pair', args=_endpoint))
elif ids_ is None:
_endpoint = create_pair(label=endpoint, value='*')
body = [create_node(kind='pair', args=_endpoint)]
else:
_endpoint = create_pair(label=endpoint, value=str(ids_))
body = [create_node(kind='pair', args=_endpoint)]
_data_update['body'] = body
return _data_update
@_print_json
def send_message(message='Hello World!', message_type='success', channel=None):
"""Celery Script to send a message.
Kind:
send_message
Arguments:
message
message_type: success, busy, warn, error, info, fun
channel: toast, email
"""
args = {}
args['message'] = message
args['message_type'] = message_type
_send_message = create_node(kind='send_message', args=args)
if channel is not None:
channels = []
if isinstance(channel, list):
for channel_ in channel:
channels.append(channel_)
else:
channels.append(channel)
body = []
for channel_ in channels:
body.append(create_node(kind='channel',
args={"channel_name": channel_}))
_send_message['body'] = body
return _send_message
@_print_json
def find_home(axis='all', speed=100):
"""Find home.
Kind:
find_home
Arguments:
axis: x, y, z, or all
speed
"""
args = {}
args['axis'] = axis
args['speed'] = speed
_find_home = create_node(kind='find_home', args=args)
return _find_home
@_print_json
def if_statement(lhs='x', op='is', rhs=0, _then=None, _else=None):
"""Celery Script if statement.
Kind:
_if
Arguments:
lhs (left-hand side)
op (operator)
rhs (right-hand side)
_then (id of sequence to execute on `then`)
_else (id of sequence to execute on `else`)
"""
args = {}
args['lhs'] = lhs
args['op'] = op
args['rhs'] = rhs
if _then is None:
_then_kind = 'nothing'
_then_args = {}
else:
_then_kind = 'execute'
_then_args = {"sequence_id": _then}
if _else is None:
_else_kind = 'nothing'
_else_args = {}
else:
_else_kind = 'execute'
_else_args = {"sequence_id": _else}
args['_then'] = create_node(kind=_then_kind, args=_then_args)
args['_else'] = create_node(kind=_else_kind, args=_else_args)
_if_statement = create_node(kind='_if', args=args)
return _if_statement
@_print_json
def write_pin(number=0, value=0, mode=0):
"""Celery Script to write a value to a pin.
Kind:
write_pin
Arguments:
pin_number: 0
pin_value: 0 [0, 1]
pin_mode: 0 [0, 1]
"""
args = {}
args['pin_number'] = number
args['pin_value'] = value
args['pin_mode'] = mode
_write_pin = create_node(kind='write_pin', args=args)
return _write_pin
@_print_json
def read_pin(number=0, mode=0, label='---'):
"""Celery Script to read the value of a pin.
Kind:
read_pin
Arguments:
pin_number: 0
pin_mode: 0 [0, 1]
label: '---'
"""
args = {}
args['pin_number'] = number
args['pin_mode'] = mode
args['label'] = label
_read_pin = create_node(kind='read_pin', args=args)
return _read_pin
@_print_json
def execute_sequence(sequence_id=0):
"""Celery Script to execute a sequence.
Kind:
execute
Arguments:
sequence_id: 0
"""
args = {}
args['sequence_id'] = sequence_id
_execute_sequence = create_node(kind='execute', args=args)
return _execute_sequence
@_print_json
def execute_script(label):
"""Celery Script to execute a farmware.
Kind:
execute_script
Arguments:
label
"""
args = {}
args['label'] = label
_execute_script = create_node(kind='execute_script', args=args)
return _execute_script
@_print_json
def take_photo():
"""Celery Script to take a photo.
Kind:
take_photo
Arguments:
{}
"""
args = {}
_take_photo = create_node(kind='take_photo', args=args)
return _take_photo
@_print_json
def wait(milliseconds=0):
"""Celery Script to wait.
Kind:
wait
Arguments:
milliseconds: 0
"""
args = {}
args['milliseconds'] = milliseconds
_wait = create_node(kind='wait', args=args)
return _wait
| FBTUG/DevZone | ai/demoCamera/plant_detection/CeleryPy.py | Python | mit | 10,249 |
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example updates user team associations.
It updates a user team association by setting the overridden access type to read
only for all teams that the user belongs to. To determine which users exists,
run get_all_users.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: UserTeamAssociationService.getUserTeamAssociationsByStatement
Tags: UserTeamAssociationService.updateUserTeamAssociations
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
USER_ID = 'INSERT_USER_ID_TO_UPDATE_HERE'
def main(client, user_id):
# Initialize appropriate service.
user_team_association_service = client.GetService(
'UserTeamAssociationService', version='v201505')
# Create filter text to select user team associations by the user ID.
values = [{
'key': 'userId',
'value': {
'xsi_type': 'NumberValue',
'value': user_id
}
}]
query = 'WHERE userId = :userId'
# Create a filter statement.
statement = dfp.FilterStatement(query, values)
# Get user team associations by statement.
response = user_team_association_service.getUserTeamAssociationsByStatement(
statement.ToStatement())
if 'results' in response:
updated_user_team_associations = []
# Update each local user team association to read only access.
for user_team_association in response['results']:
user_team_association['overriddenTeamAccessType'] = 'READ_ONLY'
updated_user_team_associations.append(user_team_association)
# Update user team associations on the server.
user_team_associations = (
user_team_association_service.updateUserTeamAssociations(
updated_user_team_associations))
# Display results.
if user_team_associations:
for user_team_association in user_team_associations:
print ('User team association between user with ID \'%s\' and team with'
' ID \'%s\' was updated.' % (user_team_association['userId'],
user_team_association['teamId']))
else:
print 'No user team associations were updated.'
else:
print 'No user team associations found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, USER_ID)
| coxmediagroup/googleads-python-lib | examples/dfp/v201505/user_team_association_service/update_user_team_associations.py | Python | apache-2.0 | 3,194 |
class EmptyAVL( Exception ):
pass
class AlreadyExists(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class AVL:
def __init__(self):
self.number = 0
self.root = None
def __setitem__(self, key, item):
self.add( AVLNode(key, item) )
def hight(self):
return -1 if self.root==None else self.root.hight
def empty(self):
return self.root==None
def add(self, node):
if self.root != None:
self.root.add(node)
else:
self.root = node
self.number +=1
def __getitem__(self, key):
return self.get(key)
def get(self, key):
if self.root == None :
raise EmptyAVL
return self.root.get(key)
def __str__(self):
if self.root:
return "Nodes : "+str(self.number)+"\n"+self.root.str(0)
else:
return "Empty AVL"
def update(self, avlB):
self.number = avlB.number
self.root = avlB.root
def suppr(self, key):
if self.number == 1 and self.root.key == key:
self.root = None
else:
self.root.suppr( key, self.root)
self.number-=1
class AVLNode:#see Yves le maire exo6.3 AVL
def __init__(self, key="", value=None, left=None, right=None):
self.key = key
self.value = value
self.left = left
self.right = right
self.update()
def update(self):
self.d = (-1 if self.right ==None else self.right.hight)
self.d -= (-1 if self.left ==None else self.left.hight) #desequilibre
self.hight= max( (-1 if self.right ==None else self.right.hight),
(-1 if self.left ==None else self.left.hight) )+1
def __gt__(self, node):
return self.key > node.key
def __lt__(self, node):
return self.key < node.key
def str(self,i):
space = " "*i
buff= space+"Key : "+str(self.key)+"\n"
buff+= space+"Value : "+str(self.value)+"\n"
buff+= space+"Hight :"+str(self.hight)+"\n"
buff+= space+"D :"+str(self.d)+"\n"
if( self.left != None ):
buff+= space+"Left :\n"+self.left.str(i+1)+"\n"
if( self.right != None ):
buff+= space+"Right :\n"+self.right.str(i+1)+"\n"
return buff
def rotate_right(self):
if self.left == None:
return
left_node = self.left.left
right_node= AVLNode( self.key, self.value, self.left.right, self.right)
self.key = self.left.key
self.value = self.left.value
self.left = left_node
self.right = right_node
self.update()
def rotate_left(self):
if self.right == None:
return
left_node= AVLNode( self.key, self.value, self.left, self.right.left)
right_node= self.right.right
self.key = self.right.key
self.value = self.right.value
self.left = left_node
self.right = right_node
self.update()
def rotate_left_right(self):
if self.left == None or self.left.right==None:
return
left_node = AVLNode( self.left.key, self.left.value, self.left.left, self.left.right.left)
right_node= AVLNode( self.key, self.value, self.left.right.right ,self.right)
self.key = self.left.right.key
self.value = self.left.right.value
self.left = left_node
self.right = right_node
self.update()
def rotate_right_left(self):
if self.right == None or self.right.left==None:
return
left_node = AVLNode( self.key, self.value, self.left, self.right.left.left)
right_node= AVLNode( self.right.key, self.right.value, self.right.left.right, self.right.right)
self.key = self.right.left.key
self.value = self.right.left.value
self.left = left_node
self.right = right_node
self.update()
def balance(self):
"""
@brief balance the avl if right and left have already been balanced
"""
if self.d == -2 :
if self.left.d == 1:
self.rotate_left_right()
else :
self.rotate_right()
elif self.d ==2 :
if self.right.d == -1:
self.rotate_right_left()
else :
self.rotate_left()
def add(self, new_node):
if new_node < self :
if self.left == None :
self.left = new_node
else:
self.left.add( new_node )
elif new_node.key == self.key :
raise AlreadyExists(key)
else :
if self.right == None :
self.right = new_node
else:
self.right.add( new_node )
self.update()
self.balance()
def get(self, key):
if self.key == key :
return self.value
elif key< self.key:
return self.left.get(key)
else :
return self.right.get(key)
def suppr_min(self):
if self.left == None:
right = self.right
return (self, right)
else :
node, right_node = self.left.suppr_min() #node est le plus petit
self.left = right_node
self.update()
self.balance()
return node, self
def erase(self, node):
if node.key<self.key:
self.left = None
else:
self.right = None
#current parent du pere pointant sur le fils courabnt
def suppr(self, key, parent):
if key<self.key:
self.left.suppr(key, self)
elif key>self.key:
self.right.suppr(key, self)
else:
if self.right == None and self.left == None:
parent.erase( self )
return
if self.right == None:
self.key= self.left.key
self.value = self.left.value
self.right = self.left.right
self.left = self.left.left
del self.left
else:
node, right_node = self.right.suppr_min()
self.key = node.key
self.value = node.value
self.right = right_node
self.update()
self.balance()
def search(self, key):
if self.key == key :
return self.value
elif key < self.key :
if self.left != None :
return self.left.search(key)
else :
return self.value
else :
if self.right != None :
return self.right.search(key)
else :
return self.value
def next(self, key, parent=None):
"""
first >=key
"""
if self.key == key :
return self.value
elif key < self.key :
if self.left != None :
return self.left.next(key, self)
elif self.right != None:
return self.value
else :
if self.right != None :
return self.right.next(key, self)
else :
return self.parent #min right
| athena-project/Artemis | src/AVL.py | Python | gpl-2.0 | 5,933 |
#coding:utf-8
__author__ = 'Administrator'
import threading
import socket,re
routers=[]
lock=threading.Lock()
def search_routers():
local_ips=socket.gethostbyname_ex(socket.gethostname())[2]
all_threads=[]
for ip in local_ips:
for i in xrange(1,255):
array=ip.split(".")
array[3]=str(i)
# print array
new_ip='.'.join(array)#把内容转换成IP
t=threading.Thread(target=check_ip,args=(new_ip,))
t.start()
all_threads.append(t)
for t in all_threads:
t.join()
def check_ip(new_ip):
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.settimeout(1)
PORT=135
result=s.connect_ex((new_ip,PORT))#跟主机地址的80端口建立链接
s.close()#关闭链接
if result==0:#
lock.acquire()#获取锁
print new_ip.ljust(15),'port %s is open'%PORT
lock.release()#释放锁
def getTitle(self):
page=self.getPage(1)
pattern=re
print 'searching for routers ,please waiting....'
search_routers()
| simplelist/python_test01 | ke_qq_com/scanPort.py | Python | lgpl-3.0 | 1,061 |
import sys,math,subprocess,numpy,pickle,os
import multiprocessing,multiprocessing.pool
import matplotlib,matplotlib.pyplot
def coverageComputer(tube):
'''
this function calls samtools to perform coverage calculation, reads the obtained files, computes the median and returns a single array of values according to geneBins
'''
localCoverage=[]
hydra=multiprocessing.pool.Pool(numberOfThreads)
tasks=[[gene,tube] for gene in allGenesSorted]
localCoverage=hydra.map(singleGeneCoverageComputer,tasks)
return localCoverage
def geneDefiner():
'''
this function returns the genes as a dictionary
'''
geneLocations={}
geneNames={}
with open(gffFile,'r') as f:
for i in range(17):
next(f)
for line in f:
v=line.split('\t')
if len(v) > 2:
if v[2] == 'gene':
geneName=v[8].split(';')[0].split('ID=')[1].split('_B')[0]
chromo=v[0]
start=int(v[3])
stop=int(v[4])
if chromo != 'chr17':
geneLocations[geneName]=[chromo,start,stop]
if chromo not in geneNames.keys():
geneNames[chromo]=[geneName]
else:
geneNames[chromo].append(geneName)
return geneLocations,geneNames
def singleGeneCoverageComputer(task):
'''
this function computes the coverage for a single gene at a single tube
'''
gene=task[0]
tube=task[1]
bamfile=path2datafiles+tube+'/data/reference.bam'
chromosome=geneLocations[gene][0]
loca=geneLocations[gene][1]
locb=geneLocations[gene][2]
pos=chromosome+':'+str(loca)+'-'+str(locb)
command=['samtools','mpileup','-r',pos,bamfile]
outFile=scratchDir+'samtoolsOutput/'+tube+'.'+gene+'.out.txt'
errFile=scratchDir+'samtoolsError/'+tube+'.'+gene+'.err.txt'
stdout = open(outFile,"w")
stderr = open(errFile,"w")
subprocess.call(command,stdout=stdout,stderr=stderr)
stdout.close()
stderr.close()
# taking the median coverage for that gene
average=[]
with open(outFile,'r') as f:
for line in f:
vector=line.split('\t')
reads=int(vector[3])
average.append(reads)
coverage=numpy.median(average)
# removing files
os.remove(outFile)
os.remove(errFile)
# final message
#print('computed coverage {} for tube {} gene {}'.format(coverage,tube,gene))
return coverage
def trajectoriesPlotter3(localTubes):
'''
this function builds rich figures about coverage
'''
# defining pseudo legend
localText=['E2 (n=0)','E2 (AP+)','E2 (AP-)']
# defining y
y1=numpy.log2(numpy.array(correctedCoverage[localTubes[0]]))
y2=numpy.log2(numpy.array(correctedCoverage[localTubes[1]]))
y3=numpy.log2(numpy.array(correctedCoverage[localTubes[2]]))
# defining x and colors
print(len(y1))
print(len(theColors))
x=[i for i in range(len(theColors))]
# plotting subfigures
matplotlib.pyplot.subplot(311)
matplotlib.pyplot.scatter(x,y1,marker='.',color=theColors)
matplotlib.pyplot.xlim([-0.025*len(x),len(x)+0.025*len(x)])
matplotlib.pyplot.ylim([-3,3])
matplotlib.pyplot.plot([0,max(x)],[1,1],'--r',lw=2)
matplotlib.pyplot.plot([0,max(x)],[-1,-1],'--b',lw=2)
matplotlib.pyplot.xticks([])
matplotlib.pyplot.yticks([-2,-1,0,1,2])
matplotlib.pyplot.subplot(312)
matplotlib.pyplot.scatter(x,y2,marker='.',color=theColors)
matplotlib.pyplot.xlim([-0.025*len(x),len(x)+0.025*len(x)])
matplotlib.pyplot.ylim([-3,3])
matplotlib.pyplot.plot([0,max(x)],[1,1],'--r',lw=2)
matplotlib.pyplot.plot([0,max(x)],[-1,-1],'--b',lw=2)
matplotlib.pyplot.xticks([])
matplotlib.pyplot.yticks([-2,-1,0,1,2])
matplotlib.pyplot.ylabel('Coverage')
matplotlib.pyplot.subplot(313)
matplotlib.pyplot.scatter(x,y3,marker='.',color=theColors)
matplotlib.pyplot.xlim([-0.025*len(x),len(x)+0.025*len(x)])
matplotlib.pyplot.ylim([-3,3])
matplotlib.pyplot.plot([0,max(x)],[1,1],'--r',lw=2)
matplotlib.pyplot.plot([0,max(x)],[-1,-1],'--b',lw=2)
matplotlib.pyplot.yticks([-2,-1,0,1,2])
# final arrangements
matplotlib.pyplot.figtext(0.85,0.325,localText[2])
matplotlib.pyplot.figtext(0.85,0.625,localText[1])
matplotlib.pyplot.figtext(0.85,0.925,localText[0])
matplotlib.pyplot.xlabel('Chromosome')
#pos=0
#xtickPositions=[]
#for chromosome in chromosomeNames:
# bins=numpy.arange(size,chromosomes[chromosome],size)
# for i in range(len(bins)-1):
# pos=pos+1
# xtickPositions.append(pos-len(bins)/2)
#minimalLabels=[str(i+1) for i in range(len(chromosomeNames))]
#matplotlib.pyplot.xticks(xtickPositions,minimalLabels)
#$for i in range(len(matplotlib.pyplot.gca().get_xticklabels())):
#if i%2 == 0:
# matplotlib.pyplot.gca().get_xticklabels()[i].set_color('orange')
# else:
# matplotlib.pyplot.gca().get_xticklabels()[i].set_color('green')
matplotlib.pyplot.tight_layout()
figureFile=figuresDir+'trajectories.perGene.{}.pdf'.format('.'.join(localTubes))
matplotlib.pyplot.savefig(figureFile)
matplotlib.pyplot.clf()
sys.exit()
return None
###
### MAIN
###
# 0. user defined variables
gffFile='/proj/omics4tb/alomana/projects/ap/seqsFromGates/src/genome/BY4741_Toronto_2012/BY4741_Toronto_2012.gff'
path2datafiles='/proj/omics4tb/alomana/projects/ap/seqsFromGates/results/toronto/'
scratchDir='/proj/omics4tb/alomana/scratch/'
figuresDir='figures/'
numberOfThreads=64
tubeCorrespondance={}
experiments=['exp1','exp2','exp3','exp4','exp5']
tubeCorrespondance['exp1']=['A2','B','D']
tubeCorrespondance['exp2']=['E2','F','G']
tubeCorrespondance['exp3']=['H2','I']
tubeCorrespondance['exp4']=['J2','K']
tubeCorrespondance['exp5']=['L','M']
tubes=[]
for experiment in experiments:
for tube in tubeCorrespondance[experiment]:
tubes.append(tube)
tubes.sort()
# 1. define the set of genes for which average coverage will be computed
# 1.1. define gene positions
geneLocations,geneNames=geneDefiner()
# 1.2. define groups of genes
allGenesSorted=[]
chromosomeNames=list(geneNames.keys())
chromosomeNames.sort()
for chromo in chromosomeNames:
geneStarts={}
for gene in geneNames[chromo]:
geneStarts[gene]=geneLocations[gene][1]
orderedGenes=sorted(geneStarts,key=geneStarts.__getitem__)
for element in orderedGenes:
allGenesSorted.append(element)
# 2. compute coverage
coverage={}
for tube in tubes:
localCoverage=coverageComputer(tube)
coverage[tube]=localCoverage
# 3. writing or reading coverage
print('pickling or unpickling data...')
jar='coverage.perSingleGene.pckl'
f=open(jar,'wb')
pickle.dump(coverage,f)
f.close()
sys.exit()
f=open(jar,'rb')
coverage=pickle.load(f)
f.close()
# 4. correcting coverage
# 4.1. computing fold-change relative to median coverage
relativeCoverage={}
for tube in tubes:
relative=coverage[tube]/numpy.median(coverage[tube])
relativeCoverage[tube]=relative
# 4.2. computing fold-change relative coverage to the empirical expectation
correctedCoverage={}
values=[]
for tube in tubes:
values.append(relativeCoverage[tube])
v=numpy.array(values)
expectedCoverage=numpy.mean(v,axis=0)
for tube in tubes:
corrected=relativeCoverage[tube]/expectedCoverage
correctedCoverage[tube]=corrected
# 4. plot figure
print('plotting figures...')
trajectoriesPlotter3(['A2','B','D'])
| adelomana/cassandra | sequenceAnalysis/coverage/geneCoverageCalculator.py | Python | gpl-3.0 | 7,762 |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Installs and configures Puppet
"""
import sys
import logging
import os
import time
from packstack.installer import utils
from packstack.installer import basedefs
from packstack.installer.exceptions import PuppetError
from packstack.installer.exceptions import ScriptRuntimeError
from packstack.installer.utils import split_hosts
from packstack.modules.common import filtered_hosts
from packstack.modules.ospluginutils import appendManifestFile
from packstack.modules.ospluginutils import generateHieraDataFile
from packstack.modules.ospluginutils import getManifestTemplate
from packstack.modules.ospluginutils import manifestfiles
from packstack.modules.puppet import validate_logfile
from packstack.modules.puppet import scan_logfile
# ------------- Puppet Packstack Plugin Initialization --------------
PLUGIN_NAME = "Puppet"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
PUPPET_DIR = os.environ.get('PACKSTACK_PUPPETDIR',
'/usr/share/openstack-puppet/')
MODULE_DIR = os.path.join(PUPPET_DIR, 'modules')
def initConfig(controller):
group = {"GROUP_NAME": "PUPPET",
"DESCRIPTION": "Puppet Config parameters",
"PRE_CONDITION": lambda x: 'yes',
"PRE_CONDITION_MATCH": "yes",
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True}
controller.addGroup(group, [])
def initSequences(controller):
puppetpresteps = [
{'title': 'Clean Up', 'functions': [run_cleanup]},
]
controller.insertSequence("Clean Up", [], [], puppetpresteps, index=0)
puppetsteps = [
{'title': 'Preparing Puppet manifests',
'functions': [prepare_puppet_modules]},
{'title': 'Copying Puppet modules and manifests',
'functions': [copy_puppet_modules]},
{'title': 'Applying Puppet manifests',
'functions': [apply_puppet_manifest]},
{'title': 'Finalizing',
'functions': [finalize]}
]
controller.addSequence("Puppet", [], [], puppetsteps)
# ------------------------- helper functions -------------------------
def wait_for_puppet(currently_running, messages):
log_len = 0
twirl = ["-", "\\", "|", "/"]
while currently_running:
for hostname, finished_logfile in currently_running:
log_file = os.path.splitext(os.path.basename(finished_logfile))[0]
if len(log_file) > log_len:
log_len = len(log_file)
if hasattr(sys.stdout, "isatty") and sys.stdout.isatty():
twirl = twirl[-1:] + twirl[:-1]
sys.stdout.write(("\rTesting if puppet apply is finished: %s"
% log_file).ljust(40 + log_len))
sys.stdout.write("[ %s ]" % twirl[0])
sys.stdout.flush()
try:
# Once a remote puppet run has finished, we retrieve the log
# file and check it for errors
local_server = utils.ScriptRunner()
log = os.path.join(basedefs.PUPPET_MANIFEST_DIR,
os.path.basename(finished_logfile))
log = log.replace(".finished", ".log")
local_server.append('scp -o StrictHostKeyChecking=no '
'-o UserKnownHostsFile=/dev/null '
'root@[%s]:%s %s'
% (hostname, finished_logfile, log))
# To not pollute logs we turn of logging of command execution
local_server.execute(log=False)
# If we got to this point the puppet apply has finished
currently_running.remove((hostname, finished_logfile))
# clean off the last "testing apply" msg
if hasattr(sys.stdout, "isatty") and sys.stdout.isatty():
sys.stdout.write(("\r").ljust(45 + log_len))
except ScriptRuntimeError:
# the test raises an exception if the file doesn't exist yet
# TO-DO: We need to start testing 'e' for unexpected exceptions
time.sleep(3)
continue
# check log file for relevant notices
messages.extend(scan_logfile(log))
# check the log file for errors
sys.stdout.write('\r')
try:
validate_logfile(log)
state = utils.state_message('%s:' % log_file, 'DONE', 'green')
sys.stdout.write('%s\n' % state)
sys.stdout.flush()
except PuppetError:
state = utils.state_message('%s:' % log_file, 'ERROR', 'red')
sys.stdout.write('%s\n' % state)
sys.stdout.flush()
raise
# -------------------------- step functions --------------------------
def run_cleanup(config, messages):
localserver = utils.ScriptRunner()
localserver.append("rm -rf %s/*pp" % basedefs.PUPPET_MANIFEST_DIR)
localserver.execute()
def copy_puppet_modules(config, messages):
os_modules = ' '.join(('aodh', 'apache', 'ceilometer', 'certmonger',
'cinder', 'concat', 'firewall', 'glance',
'gnocchi', 'heat', 'horizon', 'inifile', 'ironic',
'keystone', 'magnum', 'manila', 'memcached',
'mysql', 'neutron', 'nova', 'nssdb', 'openstack',
'openstacklib', 'oslo', 'ovn', 'packstack',
'placement', 'rabbitmq', 'redis', 'remote', 'rsync',
'sahara', 'ssh', 'stdlib', 'swift', 'sysctl',
'systemd', 'tempest', 'trove', 'vcsrepo',
'vswitch', 'xinetd'))
# write puppet manifest to disk
manifestfiles.writeManifests()
# write hieradata file to disk
generateHieraDataFile()
server = utils.ScriptRunner()
for hostname in filtered_hosts(config):
host_dir = config['HOST_DETAILS'][hostname]['tmpdir']
# copy hiera defaults.yaml file
server.append("cd %s" % basedefs.HIERADATA_DIR)
server.append("tar --dereference -cpzf - ../hieradata | "
"ssh -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null "
"root@%s tar -C %s -xpzf -" % (hostname, host_dir))
# copy Packstack manifests
server.append("cd %s/puppet" % basedefs.DIR_PROJECT_DIR)
server.append("cd %s" % basedefs.PUPPET_MANIFEST_DIR)
server.append("tar --dereference -cpzf - ../manifests | "
"ssh -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null "
"root@%s tar -C %s -xpzf -" % (hostname, host_dir))
# copy resources
resources = config.get('RESOURCES', {})
for path, localname in resources.get(hostname, []):
server.append("scp -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null "
"%s root@[%s]:%s/resources/%s" %
(path, hostname, host_dir, localname))
# copy Puppet modules required by Packstack
server.append("cd %s" % MODULE_DIR)
server.append("tar --dereference -cpzf - %s | "
"ssh -o StrictHostKeyChecking=no "
"-o UserKnownHostsFile=/dev/null "
"root@%s tar -C %s -xpzf -" %
(os_modules, hostname,
os.path.join(host_dir, 'modules')))
server.execute()
def apply_puppet_manifest(config, messages):
if config.get("DRY_RUN"):
return
currently_running = []
lastmarker = None
loglevel = ''
logcmd = False
if logging.root.level <= logging.DEBUG:
loglevel = '--debug'
logcmd = True
for manifest, marker in manifestfiles.getFiles():
# if the marker has changed then we don't want to proceed until
# all of the previous puppet runs have finished
if lastmarker is not None and lastmarker != marker:
wait_for_puppet(currently_running, messages)
lastmarker = marker
for hostname in filtered_hosts(config):
if "%s_" % hostname not in manifest:
continue
host_dir = config['HOST_DETAILS'][hostname]['tmpdir']
print("Applying %s" % manifest)
server = utils.ScriptRunner(hostname)
man_path = os.path.join(config['HOST_DETAILS'][hostname]['tmpdir'],
basedefs.PUPPET_MANIFEST_RELATIVE,
manifest)
running_logfile = "%s.running" % man_path
finished_logfile = "%s.finished" % man_path
currently_running.append((hostname, finished_logfile))
server.append("touch %s" % running_logfile)
server.append("chmod 600 %s" % running_logfile)
server.append("export PACKSTACK_VAR_DIR=%s" % host_dir)
server.append("export LANG=C.UTF-8")
cmd = ("( flock %s/ps.lock "
"puppet apply %s --modulepath %s/modules %s > %s "
"2>&1 < /dev/null ; "
"mv %s %s ) > /dev/null 2>&1 < /dev/null &"
% (host_dir, loglevel, host_dir, man_path, running_logfile,
running_logfile, finished_logfile))
server.append(cmd)
server.execute(log=logcmd)
# wait for outstanding puppet runs before exiting
wait_for_puppet(currently_running, messages)
def prepare_puppet_modules(config, messages):
network_hosts = split_hosts(config['CONFIG_NETWORK_HOSTS'])
compute_hosts = split_hosts(config['CONFIG_COMPUTE_HOSTS'])
manifestdata = getManifestTemplate("controller")
manifestfile = "%s_controller.pp" % config['CONFIG_CONTROLLER_HOST']
appendManifestFile(manifestfile, manifestdata, marker='controller')
for host in network_hosts:
manifestdata = getManifestTemplate("network")
manifestfile = "%s_network.pp" % host
appendManifestFile(manifestfile, manifestdata, marker='network')
for host in compute_hosts:
manifestdata = getManifestTemplate("compute")
manifestfile = "%s_compute.pp" % host
appendManifestFile(manifestfile, manifestdata, marker='compute')
manifestdata = getManifestTemplate("controller_post")
manifestfile = "%s_controller_post.pp" % config['CONFIG_CONTROLLER_HOST']
appendManifestFile(manifestfile, manifestdata, marker='controller')
def finalize(config, messages):
for hostname in filtered_hosts(config):
server = utils.ScriptRunner(hostname)
server.append("installed=$(rpm -q kernel --last | head -n1 | "
"sed 's/kernel-\([a-z0-9\.\_\-]*\).*/\\1/g')") # noqa: W605
server.append("loaded=$(uname -r | head -n1)")
server.append('[ "$loaded" == "$installed" ]')
try:
rc, out = server.execute()
except ScriptRuntimeError:
messages.append('Because of the kernel update the host %s '
'requires reboot.' % hostname)
| mahak/packstack | packstack/plugins/puppet_950.py | Python | apache-2.0 | 11,863 |
#!/usr/bin/env python3
import sys
try:
import __builtin__
except ImportError:
import builtins as __builtin__
import os
# python puts the program's directory path in sys.path[0]. In other words, the user ordinarily has no way
# to override python's choice of a module from its own dir. We want to have that ability in our environment.
# However, we don't want to break any established python modules that depend on this behavior. So, we'll
# save the value from sys.path[0], delete it, import our modules and then restore sys.path to its original
# value.
save_path_0 = sys.path[0]
del sys.path[0]
from gen_print import *
from gen_arg import *
from gen_plug_in import *
# Restore sys.path[0].
sys.path.insert(0, save_path_0)
# Create parser object to process command line parameters and args.
# Create parser object.
parser = argparse.ArgumentParser(
usage='%(prog)s [OPTIONS] [PLUG_IN_DIR_PATHS]',
description="%(prog)s will validate the plug-in packages passed to it."
+ " It will also print a list of the absolute plug-in"
+ " directory paths for use by the calling program.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prefix_chars='-+')
# Create arguments.
parser.add_argument(
'plug_in_dir_paths',
nargs='?',
default="",
help=plug_in_dir_paths_help_text + default_string)
parser.add_argument(
'--mch_class',
default="obmc",
help=mch_class_help_text + default_string)
# The stock_list will be passed to gen_get_options. We populate it with the names of stock parm options we
# want. These stock parms are pre-defined by gen_get_options.
stock_list = [("test_mode", 0), ("quiet", 1), ("debug", 0)]
def exit_function(signal_number=0,
frame=None):
r"""
Execute whenever the program ends normally or with the signals that we catch (i.e. TERM, INT).
"""
dprint_executing()
dprint_var(signal_number)
qprint_pgm_footer()
def signal_handler(signal_number, frame):
r"""
Handle signals. Without a function to catch a SIGTERM or SIGINT, our program would terminate immediately
with return code 143 and without calling our exit_function.
"""
# Our convention is to set up exit_function with atexit.registr() so there is no need to explicitly call
# exit_function from here.
dprint_executing()
# Calling exit prevents us from returning to the code that was running when we received the signal.
exit(0)
def validate_parms():
r"""
Validate program parameters, etc. Return True or False accordingly.
"""
gen_post_validation(exit_function, signal_handler)
return True
def main():
r"""
This is the "main" function. The advantage of having this function vs just doing this in the true
mainline is that you can:
- Declare local variables
- Use "return" instead of "exit".
- Indent 4 chars like you would in any function.
This makes coding more consistent, i.e. it's easy to move code from here into a function and vice versa.
"""
if not gen_get_options(parser, stock_list):
return False
if not validate_parms():
return False
qprint_pgm_header()
# Access program parameter globals.
global plug_in_dir_paths
global mch_class
plug_in_packages_list = return_plug_in_packages_list(plug_in_dir_paths,
mch_class)
qprint_var(plug_in_packages_list)
# As stated in the help text, this program must print the full paths of each selected plug in.
for plug_in_dir_path in plug_in_packages_list:
print(plug_in_dir_path)
return True
# Main
if not main():
exit(1)
| openbmc/openbmc-test-automation | bin/validate_plug_ins.py | Python | apache-2.0 | 3,734 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib.request
import chardet
import sys
import codecs
from html.parser import HTMLParser
lentaUrl = 'http://lenta.ru/'
starTag = 'b-yellow-box__header bordered-title'
endTag = 'b-sidebar-sticky'
newsTag = 'a'
classTag = 'class'
separator = '---------------------------------------------------------------------------------------'
news = []
class MyHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.readData = False
self.weAreIn = False
def handle_starttag(self, tag, attrs):
for attr in attrs:
if attr[0] == classTag:
if attr[1] == starTag:
self.weAreIn = True
if attr[1] == endTag:
self.weAreIn = False
if tag == newsTag and self.weAreIn == True:
news.append(separator)
self.readData = True
else:
self.readData = False
def handle_data(self, data):
if self.readData:
news.append(data)
def proceed():
request = urllib.request.urlopen(lentaUrl)
content = request.read()
encoding = chardet.detect(content)['encoding']
print('Encoding of the website: ' + str(encoding))
print('Encoding of the console: ' + str(sys.stdout.encoding))
html = content.decode('utf-8', errors='replace')
parser = MyHTMLParser()
parser.feed(html)
def show():
finalOutput = []
tempword = ' '
for word in news:
if word == separator:
finalOutput.append(tempword)
finalOutput.append(separator)
tempword = ' '
else:
tempword = tempword + ' ' + word
for f in finalOutput:
bytes = str.encode(f)
toPrint = bytes.decode('utf-8', errors='replace')
print (toPrint)
print ("LENTA NEWS:")
try:
proceed()
show()
except Exception as e:
print ("Error " + str(e.errno) + ' ' + str(e.strerror))
print ("")
input("Press Enter To Exit") | maxter2323/Python-Small-Examples | lenta.py | Python | mit | 2,026 |
#!/usr/bin/env python
# _*_ coding:utf-8 _*-_
############################
# File Name: demo.py
# Author: lza
# Created Time: 2016-08-31 11:36:08
############################
import difflib
import sys
try:
file1 = sys.argv[1]
file2 = sys.argv[2]
except Exception as e:
print "Error:"+ str(e)
print "Usage:"+ sys.argv[0] +" filename1 filename2"
sys.exit(1)
def readfile(filename):
try:
with open(filename, "rb") as f:
filetext = f.read().splitlines()
# print filetext
return filetext
except IOError as e:
print "read file %s Error: %s" %(filename, str(e))
sys.exit(1)
if file1 == "" or file2 == "":
print "Usage:"+ sys.argv[0] +" filename1 filename2"
sys.exit(1)
file1_lines = readfile(file1)
file2_lines = readfile(file2)
d = difflib.HtmlDiff()
print d.make_file(file1_lines, file2_lines)
if __name__ == "__main__":
pass
| zhengjue/mytornado | study/2/difflib/demo2.py | Python | gpl-3.0 | 925 |
from dateutil import parser
from pynwm.hydroshare import hs_list
def test_no_date():
'''Should return empty string.'''
no_dates = [None, '']
expected = ''
for date in no_dates:
returned = hs_list._date_to_start_date_arg(date)
assert expected == returned
def test_date_obj():
'''Should return startDate string URL argument.'''
date = parser.parse('2017-06-01')
returned = hs_list._date_to_start_date_arg(date)
expected = '&startDate=2017-06-01'
assert expected == returned
def test_date_str():
'''Should return startDate string URL argument.'''
dates = ['2017-06-01', '6/1/2017', '6-1-2017', '2017-6-1']
expected = '&startDate=2017-06-01'
for date in dates:
returned = hs_list._date_to_start_date_arg(date)
assert expected == returned
| twhiteaker/pynwm | src/pynwm/test/test_pynwm/test_hydroshare/test_hs_list_date_to_start_date_arg.py | Python | mit | 829 |
import sys
import os
import re
import string
import imp
from tkinter import *
import tkinter.simpledialog as tkSimpleDialog
import tkinter.messagebox as tkMessageBox
import traceback
import webbrowser
from idlelib.MultiCall import MultiCallCreator
from idlelib import idlever
from idlelib import WindowList
from idlelib import SearchDialog
from idlelib import GrepDialog
from idlelib import ReplaceDialog
from idlelib import PyParse
from idlelib.configHandler import idleConf
from idlelib import aboutDialog, textView, configDialog
from idlelib import macosxSupport
# The default tab setting for a Text widget, in average-width characters.
TK_TABWIDTH_DEFAULT = 8
def _sphinx_version():
"Format sys.version_info to produce the Sphinx version string used to install the chm docs"
major, minor, micro, level, serial = sys.version_info
release = '%s%s' % (major, minor)
if micro:
release += '%s' % (micro,)
if level == 'candidate':
release += 'rc%s' % (serial,)
elif level != 'final':
release += '%s%s' % (level[0], serial)
return release
def _find_module(fullname, path=None):
"""Version of imp.find_module() that handles hierarchical module names"""
file = None
for tgt in fullname.split('.'):
if file is not None:
file.close() # close intermediate files
(file, filename, descr) = imp.find_module(tgt, path)
if descr[2] == imp.PY_SOURCE:
break # find but not load the source file
module = imp.load_module(tgt, file, filename, descr)
try:
path = module.__path__
except AttributeError:
raise ImportError('No source for module ' + module.__name__)
if descr[2] != imp.PY_SOURCE:
# If all of the above fails and didn't raise an exception,fallback
# to a straight import which can find __init__.py in a package.
m = __import__(fullname)
try:
filename = m.__file__
except AttributeError:
pass
else:
file = None
descr = os.path.splitext(filename)[1], None, imp.PY_SOURCE
return file, filename, descr
class HelpDialog(object):
def __init__(self):
self.parent = None # parent of help window
self.dlg = None # the help window iteself
def display(self, parent, near=None):
""" Display the help dialog.
parent - parent widget for the help window
near - a Toplevel widget (e.g. EditorWindow or PyShell)
to use as a reference for placing the help window
"""
if self.dlg is None:
self.show_dialog(parent)
if near:
self.nearwindow(near)
def show_dialog(self, parent):
self.parent = parent
fn=os.path.join(os.path.abspath(os.path.dirname(__file__)),'help.txt')
self.dlg = dlg = textView.view_file(parent,'Help',fn, modal=False)
dlg.bind('<Destroy>', self.destroy, '+')
def nearwindow(self, near):
# Place the help dialog near the window specified by parent.
# Note - this may not reposition the window in Metacity
# if "/apps/metacity/general/disable_workarounds" is enabled
dlg = self.dlg
geom = (near.winfo_rootx() + 10, near.winfo_rooty() + 10)
dlg.withdraw()
dlg.geometry("=+%d+%d" % geom)
dlg.deiconify()
dlg.lift()
def destroy(self, ev=None):
self.dlg = None
self.parent = None
helpDialog = HelpDialog() # singleton instance
class EditorWindow(object):
from idlelib.Percolator import Percolator
from idlelib.ColorDelegator import ColorDelegator
from idlelib.UndoDelegator import UndoDelegator
from idlelib.IOBinding import IOBinding, filesystemencoding, encoding
from idlelib import Bindings
from tkinter import Toplevel
from idlelib.MultiStatusBar import MultiStatusBar
help_url = None
def __init__(self, flist=None, filename=None, key=None, root=None):
if EditorWindow.help_url is None:
dochome = os.path.join(sys.prefix, 'Doc', 'index.html')
if sys.platform.count('linux'):
# look for html docs in a couple of standard places
pyver = 'python-docs-' + '%s.%s.%s' % sys.version_info[:3]
if os.path.isdir('/var/www/html/python/'): # "python2" rpm
dochome = '/var/www/html/python/index.html'
else:
basepath = '/usr/share/doc/' # standard location
dochome = os.path.join(basepath, pyver,
'Doc', 'index.html')
elif sys.platform[:3] == 'win':
chmfile = os.path.join(sys.prefix, 'Doc',
'Python%s.chm' % _sphinx_version())
if os.path.isfile(chmfile):
dochome = chmfile
elif macosxSupport.runningAsOSXApp():
# documentation is stored inside the python framework
dochome = os.path.join(sys.prefix,
'Resources/English.lproj/Documentation/index.html')
dochome = os.path.normpath(dochome)
if os.path.isfile(dochome):
EditorWindow.help_url = dochome
if sys.platform == 'darwin':
# Safari requires real file:-URLs
EditorWindow.help_url = 'file://' + EditorWindow.help_url
else:
EditorWindow.help_url = "http://docs.python.org/%d.%d" % sys.version_info[:2]
currentTheme=idleConf.CurrentTheme()
self.flist = flist
root = root or flist.root
self.root = root
try:
sys.ps1
except AttributeError:
sys.ps1 = '>>> '
self.menubar = Menu(root)
self.top = top = WindowList.ListedToplevel(root, menu=self.menubar)
if flist:
self.tkinter_vars = flist.vars
#self.top.instance_dict makes flist.inversedict available to
#configDialog.py so it can access all EditorWindow instances
self.top.instance_dict = flist.inversedict
else:
self.tkinter_vars = {} # keys: Tkinter event names
# values: Tkinter variable instances
self.top.instance_dict = {}
self.recent_files_path = os.path.join(idleConf.GetUserCfgDir(),
'recent-files.lst')
self.text_frame = text_frame = Frame(top)
self.vbar = vbar = Scrollbar(text_frame, name='vbar')
self.width = idleConf.GetOption('main', 'EditorWindow',
'width', type='int')
text_options = {
'name': 'text',
'padx': 5,
'wrap': 'none',
'width': self.width,
'height': idleConf.GetOption('main', 'EditorWindow',
'height', type='int')}
if TkVersion >= 8.5:
# Starting with tk 8.5 we have to set the new tabstyle option
# to 'wordprocessor' to achieve the same display of tabs as in
# older tk versions.
text_options['tabstyle'] = 'wordprocessor'
self.text = text = MultiCallCreator(Text)(text_frame, **text_options)
self.top.focused_widget = self.text
self.createmenubar()
self.apply_bindings()
self.top.protocol("WM_DELETE_WINDOW", self.close)
self.top.bind("<<close-window>>", self.close_event)
if macosxSupport.runningAsOSXApp():
# Command-W on editorwindows doesn't work without this.
text.bind('<<close-window>>', self.close_event)
# Some OS X systems have only one mouse button,
# so use control-click for pulldown menus there.
# (Note, AquaTk defines <2> as the right button if
# present and the Tk Text widget already binds <2>.)
text.bind("<Control-Button-1>",self.right_menu_event)
else:
# Elsewhere, use right-click for pulldown menus.
text.bind("<3>",self.right_menu_event)
text.bind("<<cut>>", self.cut)
text.bind("<<copy>>", self.copy)
text.bind("<<paste>>", self.paste)
text.bind("<<center-insert>>", self.center_insert_event)
text.bind("<<help>>", self.help_dialog)
text.bind("<<python-docs>>", self.python_docs)
text.bind("<<about-idle>>", self.about_dialog)
text.bind("<<open-config-dialog>>", self.config_dialog)
text.bind("<<open-module>>", self.open_module)
text.bind("<<do-nothing>>", lambda event: "break")
text.bind("<<select-all>>", self.select_all)
text.bind("<<remove-selection>>", self.remove_selection)
text.bind("<<find>>", self.find_event)
text.bind("<<find-again>>", self.find_again_event)
text.bind("<<find-in-files>>", self.find_in_files_event)
text.bind("<<find-selection>>", self.find_selection_event)
text.bind("<<replace>>", self.replace_event)
text.bind("<<goto-line>>", self.goto_line_event)
text.bind("<<smart-backspace>>",self.smart_backspace_event)
text.bind("<<newline-and-indent>>",self.newline_and_indent_event)
text.bind("<<smart-indent>>",self.smart_indent_event)
text.bind("<<indent-region>>",self.indent_region_event)
text.bind("<<dedent-region>>",self.dedent_region_event)
text.bind("<<comment-region>>",self.comment_region_event)
text.bind("<<uncomment-region>>",self.uncomment_region_event)
text.bind("<<tabify-region>>",self.tabify_region_event)
text.bind("<<untabify-region>>",self.untabify_region_event)
text.bind("<<toggle-tabs>>",self.toggle_tabs_event)
text.bind("<<change-indentwidth>>",self.change_indentwidth_event)
text.bind("<Left>", self.move_at_edge_if_selection(0))
text.bind("<Right>", self.move_at_edge_if_selection(1))
text.bind("<<del-word-left>>", self.del_word_left)
text.bind("<<del-word-right>>", self.del_word_right)
text.bind("<<beginning-of-line>>", self.home_callback)
if flist:
flist.inversedict[self] = key
if key:
flist.dict[key] = self
text.bind("<<open-new-window>>", self.new_callback)
text.bind("<<close-all-windows>>", self.flist.close_all_callback)
text.bind("<<open-class-browser>>", self.open_class_browser)
text.bind("<<open-path-browser>>", self.open_path_browser)
self.set_status_bar()
vbar['command'] = text.yview
vbar.pack(side=RIGHT, fill=Y)
text['yscrollcommand'] = vbar.set
fontWeight = 'normal'
if idleConf.GetOption('main', 'EditorWindow', 'font-bold', type='bool'):
fontWeight='bold'
text.config(font=(idleConf.GetOption('main', 'EditorWindow', 'font'),
idleConf.GetOption('main', 'EditorWindow',
'font-size', type='int'),
fontWeight))
text_frame.pack(side=LEFT, fill=BOTH, expand=1)
text.pack(side=TOP, fill=BOTH, expand=1)
text.focus_set()
# usetabs true -> literal tab characters are used by indent and
# dedent cmds, possibly mixed with spaces if
# indentwidth is not a multiple of tabwidth,
# which will cause Tabnanny to nag!
# false -> tab characters are converted to spaces by indent
# and dedent cmds, and ditto TAB keystrokes
# Although use-spaces=0 can be configured manually in config-main.def,
# configuration of tabs v. spaces is not supported in the configuration
# dialog. IDLE promotes the preferred Python indentation: use spaces!
usespaces = idleConf.GetOption('main', 'Indent',
'use-spaces', type='bool')
self.usetabs = not usespaces
# tabwidth is the display width of a literal tab character.
# CAUTION: telling Tk to use anything other than its default
# tab setting causes it to use an entirely different tabbing algorithm,
# treating tab stops as fixed distances from the left margin.
# Nobody expects this, so for now tabwidth should never be changed.
self.tabwidth = 8 # must remain 8 until Tk is fixed.
# indentwidth is the number of screen characters per indent level.
# The recommended Python indentation is four spaces.
self.indentwidth = self.tabwidth
self.set_notabs_indentwidth()
# If context_use_ps1 is true, parsing searches back for a ps1 line;
# else searches for a popular (if, def, ...) Python stmt.
self.context_use_ps1 = False
# When searching backwards for a reliable place to begin parsing,
# first start num_context_lines[0] lines back, then
# num_context_lines[1] lines back if that didn't work, and so on.
# The last value should be huge (larger than the # of lines in a
# conceivable file).
# Making the initial values larger slows things down more often.
self.num_context_lines = 50, 500, 5000000
self.per = per = self.Percolator(text)
self.undo = undo = self.UndoDelegator()
per.insertfilter(undo)
text.undo_block_start = undo.undo_block_start
text.undo_block_stop = undo.undo_block_stop
undo.set_saved_change_hook(self.saved_change_hook)
# IOBinding implements file I/O and printing functionality
self.io = io = self.IOBinding(self)
io.set_filename_change_hook(self.filename_change_hook)
self.good_load = False
self.set_indentation_params(False)
self.color = None # initialized below in self.ResetColorizer
if filename:
if os.path.exists(filename) and not os.path.isdir(filename):
if io.loadfile(filename):
self.good_load = True
is_py_src = self.ispythonsource(filename)
self.set_indentation_params(is_py_src)
if is_py_src:
self.color = color = self.ColorDelegator()
per.insertfilter(color)
else:
io.set_filename(filename)
self.ResetColorizer()
self.saved_change_hook()
self.update_recent_files_list()
self.load_extensions()
menu = self.menudict.get('windows')
if menu:
end = menu.index("end")
if end is None:
end = -1
if end >= 0:
menu.add_separator()
end = end + 1
self.wmenu_end = end
WindowList.register_callback(self.postwindowsmenu)
# Some abstractions so IDLE extensions are cross-IDE
self.askyesno = tkMessageBox.askyesno
self.askinteger = tkSimpleDialog.askinteger
self.showerror = tkMessageBox.showerror
def _filename_to_unicode(self, filename):
"""convert filename to unicode in order to display it in Tk"""
if isinstance(filename, str) or not filename:
return filename
else:
try:
return filename.decode(self.filesystemencoding)
except UnicodeDecodeError:
# XXX
try:
return filename.decode(self.encoding)
except UnicodeDecodeError:
# byte-to-byte conversion
return filename.decode('iso8859-1')
def new_callback(self, event):
dirname, basename = self.io.defaultfilename()
self.flist.new(dirname)
return "break"
def home_callback(self, event):
if (event.state & 4) != 0 and event.keysym == "Home":
# state&4==Control. If <Control-Home>, use the Tk binding.
return
if self.text.index("iomark") and \
self.text.compare("iomark", "<=", "insert lineend") and \
self.text.compare("insert linestart", "<=", "iomark"):
# In Shell on input line, go to just after prompt
insertpt = int(self.text.index("iomark").split(".")[1])
else:
line = self.text.get("insert linestart", "insert lineend")
for insertpt in range(len(line)):
if line[insertpt] not in (' ','\t'):
break
else:
insertpt=len(line)
lineat = int(self.text.index("insert").split('.')[1])
if insertpt == lineat:
insertpt = 0
dest = "insert linestart+"+str(insertpt)+"c"
if (event.state&1) == 0:
# shift was not pressed
self.text.tag_remove("sel", "1.0", "end")
else:
if not self.text.index("sel.first"):
# there was no previous selection
self.text.mark_set("my_anchor", "insert")
else:
if self.text.compare(self.text.index("sel.first"), "<",
self.text.index("insert")):
self.text.mark_set("my_anchor", "sel.first") # extend back
else:
self.text.mark_set("my_anchor", "sel.last") # extend forward
first = self.text.index(dest)
last = self.text.index("my_anchor")
if self.text.compare(first,">",last):
first,last = last,first
self.text.tag_remove("sel", "1.0", "end")
self.text.tag_add("sel", first, last)
self.text.mark_set("insert", dest)
self.text.see("insert")
return "break"
def set_status_bar(self):
self.status_bar = self.MultiStatusBar(self.top)
if macosxSupport.runningAsOSXApp():
# Insert some padding to avoid obscuring some of the statusbar
# by the resize widget.
self.status_bar.set_label('_padding1', ' ', side=RIGHT)
self.status_bar.set_label('column', 'Col: ?', side=RIGHT)
self.status_bar.set_label('line', 'Ln: ?', side=RIGHT)
self.status_bar.pack(side=BOTTOM, fill=X)
self.text.bind("<<set-line-and-column>>", self.set_line_and_column)
self.text.event_add("<<set-line-and-column>>",
"<KeyRelease>", "<ButtonRelease>")
self.text.after_idle(self.set_line_and_column)
def set_line_and_column(self, event=None):
line, column = self.text.index(INSERT).split('.')
self.status_bar.set_label('column', 'Col: %s' % column)
self.status_bar.set_label('line', 'Ln: %s' % line)
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("format", "F_ormat"),
("run", "_Run"),
("options", "_Options"),
("windows", "_Windows"),
("help", "_Help"),
]
if macosxSupport.runningAsOSXApp():
del menu_specs[-3]
menu_specs[-2] = ("windows", "_Window")
def createmenubar(self):
mbar = self.menubar
self.menudict = menudict = {}
for name, label in self.menu_specs:
underline, label = prepstr(label)
menudict[name] = menu = Menu(mbar, name=name)
mbar.add_cascade(label=label, menu=menu, underline=underline)
if macosxSupport.isCarbonAquaTk(self.root):
# Insert the application menu
menudict['application'] = menu = Menu(mbar, name='apple')
mbar.add_cascade(label='IDLE', menu=menu)
self.fill_menus()
self.recent_files_menu = Menu(self.menubar)
self.menudict['file'].insert_cascade(3, label='Recent Files',
underline=0,
menu=self.recent_files_menu)
self.base_helpmenu_length = self.menudict['help'].index(END)
self.reset_help_menu_entries()
def postwindowsmenu(self):
# Only called when Windows menu exists
menu = self.menudict['windows']
end = menu.index("end")
if end is None:
end = -1
if end > self.wmenu_end:
menu.delete(self.wmenu_end+1, end)
WindowList.add_windows_to_menu(menu)
rmenu = None
def right_menu_event(self, event):
self.text.mark_set("insert", "@%d,%d" % (event.x, event.y))
if not self.rmenu:
self.make_rmenu()
rmenu = self.rmenu
self.event = event
iswin = sys.platform[:3] == 'win'
if iswin:
self.text.config(cursor="arrow")
for label, eventname, verify_state in self.rmenu_specs:
if verify_state is None:
continue
state = getattr(self, verify_state)()
rmenu.entryconfigure(label, state=state)
rmenu.tk_popup(event.x_root, event.y_root)
if iswin:
self.text.config(cursor="ibeam")
rmenu_specs = [
# ("Label", "<<virtual-event>>", "statefuncname"), ...
("Close", "<<close-window>>", None), # Example
]
def make_rmenu(self):
rmenu = Menu(self.text, tearoff=0)
for label, eventname, _ in self.rmenu_specs:
if label is not None:
def command(text=self.text, eventname=eventname):
text.event_generate(eventname)
rmenu.add_command(label=label, command=command)
else:
rmenu.add_separator()
self.rmenu = rmenu
def rmenu_check_cut(self):
return self.rmenu_check_copy()
def rmenu_check_copy(self):
try:
indx = self.text.index('sel.first')
except TclError:
return 'disabled'
else:
return 'normal' if indx else 'disabled'
def rmenu_check_paste(self):
try:
self.text.tk.call('tk::GetSelection', self.text, 'CLIPBOARD')
except TclError:
return 'disabled'
else:
return 'normal'
def about_dialog(self, event=None):
aboutDialog.AboutDialog(self.top,'About IDLE')
def config_dialog(self, event=None):
configDialog.ConfigDialog(self.top,'Settings')
def help_dialog(self, event=None):
if self.root:
parent = self.root
else:
parent = self.top
helpDialog.display(parent, near=self.top)
def python_docs(self, event=None):
if sys.platform[:3] == 'win':
try:
os.startfile(self.help_url)
except WindowsError as why:
tkMessageBox.showerror(title='Document Start Failure',
message=str(why), parent=self.text)
else:
webbrowser.open(self.help_url)
return "break"
def cut(self,event):
self.text.event_generate("<<Cut>>")
return "break"
def copy(self,event):
if not self.text.tag_ranges("sel"):
# There is no selection, so do nothing and maybe interrupt.
return
self.text.event_generate("<<Copy>>")
return "break"
def paste(self,event):
self.text.event_generate("<<Paste>>")
self.text.see("insert")
return "break"
def select_all(self, event=None):
self.text.tag_add("sel", "1.0", "end-1c")
self.text.mark_set("insert", "1.0")
self.text.see("insert")
return "break"
def remove_selection(self, event=None):
self.text.tag_remove("sel", "1.0", "end")
self.text.see("insert")
def move_at_edge_if_selection(self, edge_index):
"""Cursor move begins at start or end of selection
When a left/right cursor key is pressed create and return to Tkinter a
function which causes a cursor move from the associated edge of the
selection.
"""
self_text_index = self.text.index
self_text_mark_set = self.text.mark_set
edges_table = ("sel.first+1c", "sel.last-1c")
def move_at_edge(event):
if (event.state & 5) == 0: # no shift(==1) or control(==4) pressed
try:
self_text_index("sel.first")
self_text_mark_set("insert", edges_table[edge_index])
except TclError:
pass
return move_at_edge
def del_word_left(self, event):
self.text.event_generate('<Meta-Delete>')
return "break"
def del_word_right(self, event):
self.text.event_generate('<Meta-d>')
return "break"
def find_event(self, event):
SearchDialog.find(self.text)
return "break"
def find_again_event(self, event):
SearchDialog.find_again(self.text)
return "break"
def find_selection_event(self, event):
SearchDialog.find_selection(self.text)
return "break"
def find_in_files_event(self, event):
GrepDialog.grep(self.text, self.io, self.flist)
return "break"
def replace_event(self, event):
ReplaceDialog.replace(self.text)
return "break"
def goto_line_event(self, event):
text = self.text
lineno = tkSimpleDialog.askinteger("Goto",
"Go to line number:",parent=text)
if lineno is None:
return "break"
if lineno <= 0:
text.bell()
return "break"
text.mark_set("insert", "%d.0" % lineno)
text.see("insert")
def open_module(self, event=None):
# XXX Shouldn't this be in IOBinding?
try:
name = self.text.get("sel.first", "sel.last")
except TclError:
name = ""
else:
name = name.strip()
name = tkSimpleDialog.askstring("Module",
"Enter the name of a Python module\n"
"to search on sys.path and open:",
parent=self.text, initialvalue=name)
if name:
name = name.strip()
if not name:
return
# XXX Ought to insert current file's directory in front of path
try:
(f, file, (suffix, mode, type)) = _find_module(name)
except (NameError, ImportError) as msg:
tkMessageBox.showerror("Import error", str(msg), parent=self.text)
return
if type != imp.PY_SOURCE:
tkMessageBox.showerror("Unsupported type",
"%s is not a source module" % name, parent=self.text)
return
if f:
f.close()
if self.flist:
self.flist.open(file)
else:
self.io.loadfile(file)
def open_class_browser(self, event=None):
filename = self.io.filename
if not filename:
tkMessageBox.showerror(
"No filename",
"This buffer has no associated filename",
master=self.text)
self.text.focus_set()
return None
head, tail = os.path.split(filename)
base, ext = os.path.splitext(tail)
from idlelib import ClassBrowser
ClassBrowser.ClassBrowser(self.flist, base, [head])
def open_path_browser(self, event=None):
from idlelib import PathBrowser
PathBrowser.PathBrowser(self.flist)
def gotoline(self, lineno):
if lineno is not None and lineno > 0:
self.text.mark_set("insert", "%d.0" % lineno)
self.text.tag_remove("sel", "1.0", "end")
self.text.tag_add("sel", "insert", "insert +1l")
self.center()
def ispythonsource(self, filename):
if not filename or os.path.isdir(filename):
return True
base, ext = os.path.splitext(os.path.basename(filename))
if os.path.normcase(ext) in (".py", ".pyw"):
return True
line = self.text.get('1.0', '1.0 lineend')
return line.startswith('#!') and 'python' in line
def close_hook(self):
if self.flist:
self.flist.unregister_maybe_terminate(self)
self.flist = None
def set_close_hook(self, close_hook):
self.close_hook = close_hook
def filename_change_hook(self):
if self.flist:
self.flist.filename_changed_edit(self)
self.saved_change_hook()
self.top.update_windowlist_registry(self)
self.ResetColorizer()
def _addcolorizer(self):
if self.color:
return
if self.ispythonsource(self.io.filename):
self.color = self.ColorDelegator()
# can add more colorizers here...
if self.color:
self.per.removefilter(self.undo)
self.per.insertfilter(self.color)
self.per.insertfilter(self.undo)
def _rmcolorizer(self):
if not self.color:
return
self.color.removecolors()
self.per.removefilter(self.color)
self.color = None
def ResetColorizer(self):
"Update the colour theme"
# Called from self.filename_change_hook and from configDialog.py
self._rmcolorizer()
self._addcolorizer()
theme = idleConf.GetOption('main','Theme','name')
normal_colors = idleConf.GetHighlight(theme, 'normal')
cursor_color = idleConf.GetHighlight(theme, 'cursor', fgBg='fg')
select_colors = idleConf.GetHighlight(theme, 'hilite')
self.text.config(
foreground=normal_colors['foreground'],
background=normal_colors['background'],
insertbackground=cursor_color,
selectforeground=select_colors['foreground'],
selectbackground=select_colors['background'],
)
IDENTCHARS = string.ascii_letters + string.digits + "_"
def colorize_syntax_error(self, text, pos):
text.tag_add("ERROR", pos)
char = text.get(pos)
if char and char in self.IDENTCHARS:
text.tag_add("ERROR", pos + " wordstart", pos)
if '\n' == text.get(pos): # error at line end
text.mark_set("insert", pos)
else:
text.mark_set("insert", pos + "+1c")
text.see(pos)
def ResetFont(self):
"Update the text widgets' font if it is changed"
# Called from configDialog.py
fontWeight='normal'
if idleConf.GetOption('main','EditorWindow','font-bold',type='bool'):
fontWeight='bold'
self.text.config(font=(idleConf.GetOption('main','EditorWindow','font'),
idleConf.GetOption('main','EditorWindow','font-size',
type='int'),
fontWeight))
def RemoveKeybindings(self):
"Remove the keybindings before they are changed."
# Called from configDialog.py
self.Bindings.default_keydefs = keydefs = idleConf.GetCurrentKeySet()
for event, keylist in keydefs.items():
self.text.event_delete(event, *keylist)
for extensionName in self.get_standard_extension_names():
xkeydefs = idleConf.GetExtensionBindings(extensionName)
if xkeydefs:
for event, keylist in xkeydefs.items():
self.text.event_delete(event, *keylist)
def ApplyKeybindings(self):
"Update the keybindings after they are changed"
# Called from configDialog.py
self.Bindings.default_keydefs = keydefs = idleConf.GetCurrentKeySet()
self.apply_bindings()
for extensionName in self.get_standard_extension_names():
xkeydefs = idleConf.GetExtensionBindings(extensionName)
if xkeydefs:
self.apply_bindings(xkeydefs)
#update menu accelerators
menuEventDict = {}
for menu in self.Bindings.menudefs:
menuEventDict[menu[0]] = {}
for item in menu[1]:
if item:
menuEventDict[menu[0]][prepstr(item[0])[1]] = item[1]
for menubarItem in self.menudict:
menu = self.menudict[menubarItem]
end = menu.index(END) + 1
for index in range(0, end):
if menu.type(index) == 'command':
accel = menu.entrycget(index, 'accelerator')
if accel:
itemName = menu.entrycget(index, 'label')
event = ''
if menubarItem in menuEventDict:
if itemName in menuEventDict[menubarItem]:
event = menuEventDict[menubarItem][itemName]
if event:
accel = get_accelerator(keydefs, event)
menu.entryconfig(index, accelerator=accel)
def set_notabs_indentwidth(self):
"Update the indentwidth if changed and not using tabs in this window"
# Called from configDialog.py
if not self.usetabs:
self.indentwidth = idleConf.GetOption('main', 'Indent','num-spaces',
type='int')
def reset_help_menu_entries(self):
"Update the additional help entries on the Help menu"
help_list = idleConf.GetAllExtraHelpSourcesList()
helpmenu = self.menudict['help']
# first delete the extra help entries, if any
helpmenu_length = helpmenu.index(END)
if helpmenu_length > self.base_helpmenu_length:
helpmenu.delete((self.base_helpmenu_length + 1), helpmenu_length)
# then rebuild them
if help_list:
helpmenu.add_separator()
for entry in help_list:
cmd = self.__extra_help_callback(entry[1])
helpmenu.add_command(label=entry[0], command=cmd)
# and update the menu dictionary
self.menudict['help'] = helpmenu
def __extra_help_callback(self, helpfile):
"Create a callback with the helpfile value frozen at definition time"
def display_extra_help(helpfile=helpfile):
if not helpfile.startswith(('www', 'http')):
helpfile = os.path.normpath(helpfile)
if sys.platform[:3] == 'win':
try:
os.startfile(helpfile)
except WindowsError as why:
tkMessageBox.showerror(title='Document Start Failure',
message=str(why), parent=self.text)
else:
webbrowser.open(helpfile)
return display_extra_help
def update_recent_files_list(self, new_file=None):
"Load and update the recent files list and menus"
rf_list = []
if os.path.exists(self.recent_files_path):
rf_list_file = open(self.recent_files_path,'r',
encoding='utf_8', errors='replace')
try:
rf_list = rf_list_file.readlines()
finally:
rf_list_file.close()
if new_file:
new_file = os.path.abspath(new_file) + '\n'
if new_file in rf_list:
rf_list.remove(new_file) # move to top
rf_list.insert(0, new_file)
# clean and save the recent files list
bad_paths = []
for path in rf_list:
if '\0' in path or not os.path.exists(path[0:-1]):
bad_paths.append(path)
rf_list = [path for path in rf_list if path not in bad_paths]
ulchars = "1234567890ABCDEFGHIJK"
rf_list = rf_list[0:len(ulchars)]
try:
with open(self.recent_files_path, 'w',
encoding='utf_8', errors='replace') as rf_file:
rf_file.writelines(rf_list)
except IOError as err:
if not getattr(self.root, "recentfilelist_error_displayed", False):
self.root.recentfilelist_error_displayed = True
tkMessageBox.showerror(title='IDLE Error',
message='Unable to update Recent Files list:\n%s'
% str(err),
parent=self.text)
# for each edit window instance, construct the recent files menu
for instance in self.top.instance_dict:
menu = instance.recent_files_menu
menu.delete(0, END) # clear, and rebuild:
for i, file_name in enumerate(rf_list):
file_name = file_name.rstrip() # zap \n
# make unicode string to display non-ASCII chars correctly
ufile_name = self._filename_to_unicode(file_name)
callback = instance.__recent_file_callback(file_name)
menu.add_command(label=ulchars[i] + " " + ufile_name,
command=callback,
underline=0)
def __recent_file_callback(self, file_name):
def open_recent_file(fn_closure=file_name):
self.io.open(editFile=fn_closure)
return open_recent_file
def saved_change_hook(self):
short = self.short_title()
long = self.long_title()
if short and long:
title = short + " - " + long
elif short:
title = short
elif long:
title = long
else:
title = "Untitled"
icon = short or long or title
if not self.get_saved():
title = "*%s*" % title
icon = "*%s" % icon
self.top.wm_title(title)
self.top.wm_iconname(icon)
def get_saved(self):
return self.undo.get_saved()
def set_saved(self, flag):
self.undo.set_saved(flag)
def reset_undo(self):
self.undo.reset_undo()
def short_title(self):
filename = self.io.filename
if filename:
filename = os.path.basename(filename)
# return unicode string to display non-ASCII chars correctly
return self._filename_to_unicode(filename)
def long_title(self):
# return unicode string to display non-ASCII chars correctly
return self._filename_to_unicode(self.io.filename or "")
def center_insert_event(self, event):
self.center()
def center(self, mark="insert"):
text = self.text
top, bot = self.getwindowlines()
lineno = self.getlineno(mark)
height = bot - top
newtop = max(1, lineno - height//2)
text.yview(float(newtop))
def getwindowlines(self):
text = self.text
top = self.getlineno("@0,0")
bot = self.getlineno("@0,65535")
if top == bot and text.winfo_height() == 1:
# Geometry manager hasn't run yet
height = int(text['height'])
bot = top + height - 1
return top, bot
def getlineno(self, mark="insert"):
text = self.text
return int(float(text.index(mark)))
def get_geometry(self):
"Return (width, height, x, y)"
geom = self.top.wm_geometry()
m = re.match(r"(\d+)x(\d+)\+(-?\d+)\+(-?\d+)", geom)
return list(map(int, m.groups()))
def close_event(self, event):
self.close()
def maybesave(self):
if self.io:
if not self.get_saved():
if self.top.state()!='normal':
self.top.deiconify()
self.top.lower()
self.top.lift()
return self.io.maybesave()
def close(self):
reply = self.maybesave()
if str(reply) != "cancel":
self._close()
return reply
def _close(self):
if self.io.filename:
self.update_recent_files_list(new_file=self.io.filename)
WindowList.unregister_callback(self.postwindowsmenu)
self.unload_extensions()
self.io.close()
self.io = None
self.undo = None
if self.color:
self.color.close(False)
self.color = None
self.text = None
self.tkinter_vars = None
self.per.close()
self.per = None
self.top.destroy()
if self.close_hook:
# unless override: unregister from flist, terminate if last window
self.close_hook()
def load_extensions(self):
self.extensions = {}
self.load_standard_extensions()
def unload_extensions(self):
for ins in list(self.extensions.values()):
if hasattr(ins, "close"):
ins.close()
self.extensions = {}
def load_standard_extensions(self):
for name in self.get_standard_extension_names():
try:
self.load_extension(name)
except:
print("Failed to load extension", repr(name))
traceback.print_exc()
def get_standard_extension_names(self):
return idleConf.GetExtensions(editor_only=True)
def load_extension(self, name):
try:
mod = __import__(name, globals(), locals(), [])
except ImportError:
print("\nFailed to import extension: ", name)
raise
cls = getattr(mod, name)
keydefs = idleConf.GetExtensionBindings(name)
if hasattr(cls, "menudefs"):
self.fill_menus(cls.menudefs, keydefs)
ins = cls(self)
self.extensions[name] = ins
if keydefs:
self.apply_bindings(keydefs)
for vevent in keydefs:
methodname = vevent.replace("-", "_")
while methodname[:1] == '<':
methodname = methodname[1:]
while methodname[-1:] == '>':
methodname = methodname[:-1]
methodname = methodname + "_event"
if hasattr(ins, methodname):
self.text.bind(vevent, getattr(ins, methodname))
def apply_bindings(self, keydefs=None):
if keydefs is None:
keydefs = self.Bindings.default_keydefs
text = self.text
text.keydefs = keydefs
for event, keylist in keydefs.items():
if keylist:
text.event_add(event, *keylist)
def fill_menus(self, menudefs=None, keydefs=None):
"""Add appropriate entries to the menus and submenus
Menus that are absent or None in self.menudict are ignored.
"""
if menudefs is None:
menudefs = self.Bindings.menudefs
if keydefs is None:
keydefs = self.Bindings.default_keydefs
menudict = self.menudict
text = self.text
for mname, entrylist in menudefs:
menu = menudict.get(mname)
if not menu:
continue
for entry in entrylist:
if not entry:
menu.add_separator()
else:
label, eventname = entry
checkbutton = (label[:1] == '!')
if checkbutton:
label = label[1:]
underline, label = prepstr(label)
accelerator = get_accelerator(keydefs, eventname)
def command(text=text, eventname=eventname):
text.event_generate(eventname)
if checkbutton:
var = self.get_var_obj(eventname, BooleanVar)
menu.add_checkbutton(label=label, underline=underline,
command=command, accelerator=accelerator,
variable=var)
else:
menu.add_command(label=label, underline=underline,
command=command,
accelerator=accelerator)
def getvar(self, name):
var = self.get_var_obj(name)
if var:
value = var.get()
return value
else:
raise NameError(name)
def setvar(self, name, value, vartype=None):
var = self.get_var_obj(name, vartype)
if var:
var.set(value)
else:
raise NameError(name)
def get_var_obj(self, name, vartype=None):
var = self.tkinter_vars.get(name)
if not var and vartype:
# create a Tkinter variable object with self.text as master:
self.tkinter_vars[name] = var = vartype(self.text)
return var
# Tk implementations of "virtual text methods" -- each platform
# reusing IDLE's support code needs to define these for its GUI's
# flavor of widget.
# Is character at text_index in a Python string? Return 0 for
# "guaranteed no", true for anything else. This info is expensive
# to compute ab initio, but is probably already known by the
# platform's colorizer.
def is_char_in_string(self, text_index):
if self.color:
# Return true iff colorizer hasn't (re)gotten this far
# yet, or the character is tagged as being in a string
return self.text.tag_prevrange("TODO", text_index) or \
"STRING" in self.text.tag_names(text_index)
else:
# The colorizer is missing: assume the worst
return 1
# If a selection is defined in the text widget, return (start,
# end) as Tkinter text indices, otherwise return (None, None)
def get_selection_indices(self):
try:
first = self.text.index("sel.first")
last = self.text.index("sel.last")
return first, last
except TclError:
return None, None
# Return the text widget's current view of what a tab stop means
# (equivalent width in spaces).
def get_tk_tabwidth(self):
current = self.text['tabs'] or TK_TABWIDTH_DEFAULT
return int(current)
# Set the text widget's current view of what a tab stop means.
def set_tk_tabwidth(self, newtabwidth):
text = self.text
if self.get_tk_tabwidth() != newtabwidth:
# Set text widget tab width
pixels = text.tk.call("font", "measure", text["font"],
"-displayof", text.master,
"n" * newtabwidth)
text.configure(tabs=pixels)
### begin autoindent code ### (configuration was moved to beginning of class)
def set_indentation_params(self, is_py_src, guess=True):
if is_py_src and guess:
i = self.guess_indent()
if 2 <= i <= 8:
self.indentwidth = i
if self.indentwidth != self.tabwidth:
self.usetabs = False
self.set_tk_tabwidth(self.tabwidth)
def smart_backspace_event(self, event):
text = self.text
first, last = self.get_selection_indices()
if first and last:
text.delete(first, last)
text.mark_set("insert", first)
return "break"
# Delete whitespace left, until hitting a real char or closest
# preceding virtual tab stop.
chars = text.get("insert linestart", "insert")
if chars == '':
if text.compare("insert", ">", "1.0"):
# easy: delete preceding newline
text.delete("insert-1c")
else:
text.bell() # at start of buffer
return "break"
if chars[-1] not in " \t":
# easy: delete preceding real char
text.delete("insert-1c")
return "break"
# Ick. It may require *inserting* spaces if we back up over a
# tab character! This is written to be clear, not fast.
tabwidth = self.tabwidth
have = len(chars.expandtabs(tabwidth))
assert have > 0
want = ((have - 1) // self.indentwidth) * self.indentwidth
# Debug prompt is multilined....
if self.context_use_ps1:
last_line_of_prompt = sys.ps1.split('\n')[-1]
else:
last_line_of_prompt = ''
ncharsdeleted = 0
while 1:
if chars == last_line_of_prompt:
break
chars = chars[:-1]
ncharsdeleted = ncharsdeleted + 1
have = len(chars.expandtabs(tabwidth))
if have <= want or chars[-1] not in " \t":
break
text.undo_block_start()
text.delete("insert-%dc" % ncharsdeleted, "insert")
if have < want:
text.insert("insert", ' ' * (want - have))
text.undo_block_stop()
return "break"
def smart_indent_event(self, event):
# if intraline selection:
# delete it
# elif multiline selection:
# do indent-region
# else:
# indent one level
text = self.text
first, last = self.get_selection_indices()
text.undo_block_start()
try:
if first and last:
if index2line(first) != index2line(last):
return self.indent_region_event(event)
text.delete(first, last)
text.mark_set("insert", first)
prefix = text.get("insert linestart", "insert")
raw, effective = classifyws(prefix, self.tabwidth)
if raw == len(prefix):
# only whitespace to the left
self.reindent_to(effective + self.indentwidth)
else:
# tab to the next 'stop' within or to right of line's text:
if self.usetabs:
pad = '\t'
else:
effective = len(prefix.expandtabs(self.tabwidth))
n = self.indentwidth
pad = ' ' * (n - effective % n)
text.insert("insert", pad)
text.see("insert")
return "break"
finally:
text.undo_block_stop()
def newline_and_indent_event(self, event):
text = self.text
first, last = self.get_selection_indices()
text.undo_block_start()
try:
if first and last:
text.delete(first, last)
text.mark_set("insert", first)
line = text.get("insert linestart", "insert")
i, n = 0, len(line)
while i < n and line[i] in " \t":
i = i+1
if i == n:
# the cursor is in or at leading indentation in a continuation
# line; just inject an empty line at the start
text.insert("insert linestart", '\n')
return "break"
indent = line[:i]
# strip whitespace before insert point unless it's in the prompt
i = 0
last_line_of_prompt = sys.ps1.split('\n')[-1]
while line and line[-1] in " \t" and line != last_line_of_prompt:
line = line[:-1]
i = i+1
if i:
text.delete("insert - %d chars" % i, "insert")
# strip whitespace after insert point
while text.get("insert") in " \t":
text.delete("insert")
# start new line
text.insert("insert", '\n')
# adjust indentation for continuations and block
# open/close first need to find the last stmt
lno = index2line(text.index('insert'))
y = PyParse.Parser(self.indentwidth, self.tabwidth)
if not self.context_use_ps1:
for context in self.num_context_lines:
startat = max(lno - context, 1)
startatindex = repr(startat) + ".0"
rawtext = text.get(startatindex, "insert")
y.set_str(rawtext)
bod = y.find_good_parse_start(
self.context_use_ps1,
self._build_char_in_string_func(startatindex))
if bod is not None or startat == 1:
break
y.set_lo(bod or 0)
else:
r = text.tag_prevrange("console", "insert")
if r:
startatindex = r[1]
else:
startatindex = "1.0"
rawtext = text.get(startatindex, "insert")
y.set_str(rawtext)
y.set_lo(0)
c = y.get_continuation_type()
if c != PyParse.C_NONE:
# The current stmt hasn't ended yet.
if c == PyParse.C_STRING_FIRST_LINE:
# after the first line of a string; do not indent at all
pass
elif c == PyParse.C_STRING_NEXT_LINES:
# inside a string which started before this line;
# just mimic the current indent
text.insert("insert", indent)
elif c == PyParse.C_BRACKET:
# line up with the first (if any) element of the
# last open bracket structure; else indent one
# level beyond the indent of the line with the
# last open bracket
self.reindent_to(y.compute_bracket_indent())
elif c == PyParse.C_BACKSLASH:
# if more than one line in this stmt already, just
# mimic the current indent; else if initial line
# has a start on an assignment stmt, indent to
# beyond leftmost =; else to beyond first chunk of
# non-whitespace on initial line
if y.get_num_lines_in_stmt() > 1:
text.insert("insert", indent)
else:
self.reindent_to(y.compute_backslash_indent())
else:
assert 0, "bogus continuation type %r" % (c,)
return "break"
# This line starts a brand new stmt; indent relative to
# indentation of initial line of closest preceding
# interesting stmt.
indent = y.get_base_indent_string()
text.insert("insert", indent)
if y.is_block_opener():
self.smart_indent_event(event)
elif indent and y.is_block_closer():
self.smart_backspace_event(event)
return "break"
finally:
text.see("insert")
text.undo_block_stop()
# Our editwin provides a is_char_in_string function that works
# with a Tk text index, but PyParse only knows about offsets into
# a string. This builds a function for PyParse that accepts an
# offset.
def _build_char_in_string_func(self, startindex):
def inner(offset, _startindex=startindex,
_icis=self.is_char_in_string):
return _icis(_startindex + "+%dc" % offset)
return inner
def indent_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, self.tabwidth)
effective = effective + self.indentwidth
lines[pos] = self._make_blanks(effective) + line[raw:]
self.set_region(head, tail, chars, lines)
return "break"
def dedent_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, self.tabwidth)
effective = max(effective - self.indentwidth, 0)
lines[pos] = self._make_blanks(effective) + line[raw:]
self.set_region(head, tail, chars, lines)
return "break"
def comment_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines) - 1):
line = lines[pos]
lines[pos] = '##' + line
self.set_region(head, tail, chars, lines)
def uncomment_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines)):
line = lines[pos]
if not line:
continue
if line[:2] == '##':
line = line[2:]
elif line[:1] == '#':
line = line[1:]
lines[pos] = line
self.set_region(head, tail, chars, lines)
def tabify_region_event(self, event):
head, tail, chars, lines = self.get_region()
tabwidth = self._asktabwidth()
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, tabwidth)
ntabs, nspaces = divmod(effective, tabwidth)
lines[pos] = '\t' * ntabs + ' ' * nspaces + line[raw:]
self.set_region(head, tail, chars, lines)
def untabify_region_event(self, event):
head, tail, chars, lines = self.get_region()
tabwidth = self._asktabwidth()
for pos in range(len(lines)):
lines[pos] = lines[pos].expandtabs(tabwidth)
self.set_region(head, tail, chars, lines)
def toggle_tabs_event(self, event):
if self.askyesno(
"Toggle tabs",
"Turn tabs " + ("on", "off")[self.usetabs] +
"?\nIndent width " +
("will be", "remains at")[self.usetabs] + " 8." +
"\n Note: a tab is always 8 columns",
parent=self.text):
self.usetabs = not self.usetabs
# Try to prevent inconsistent indentation.
# User must change indent width manually after using tabs.
self.indentwidth = 8
return "break"
# XXX this isn't bound to anything -- see tabwidth comments
## def change_tabwidth_event(self, event):
## new = self._asktabwidth()
## if new != self.tabwidth:
## self.tabwidth = new
## self.set_indentation_params(0, guess=0)
## return "break"
def change_indentwidth_event(self, event):
new = self.askinteger(
"Indent width",
"New indent width (2-16)\n(Always use 8 when using tabs)",
parent=self.text,
initialvalue=self.indentwidth,
minvalue=2,
maxvalue=16)
if new and new != self.indentwidth and not self.usetabs:
self.indentwidth = new
return "break"
def get_region(self):
text = self.text
first, last = self.get_selection_indices()
if first and last:
head = text.index(first + " linestart")
tail = text.index(last + "-1c lineend +1c")
else:
head = text.index("insert linestart")
tail = text.index("insert lineend +1c")
chars = text.get(head, tail)
lines = chars.split("\n")
return head, tail, chars, lines
def set_region(self, head, tail, chars, lines):
text = self.text
newchars = "\n".join(lines)
if newchars == chars:
text.bell()
return
text.tag_remove("sel", "1.0", "end")
text.mark_set("insert", head)
text.undo_block_start()
text.delete(head, tail)
text.insert(head, newchars)
text.undo_block_stop()
text.tag_add("sel", head, "insert")
# Make string that displays as n leading blanks.
def _make_blanks(self, n):
if self.usetabs:
ntabs, nspaces = divmod(n, self.tabwidth)
return '\t' * ntabs + ' ' * nspaces
else:
return ' ' * n
# Delete from beginning of line to insert point, then reinsert
# column logical (meaning use tabs if appropriate) spaces.
def reindent_to(self, column):
text = self.text
text.undo_block_start()
if text.compare("insert linestart", "!=", "insert"):
text.delete("insert linestart", "insert")
if column:
text.insert("insert", self._make_blanks(column))
text.undo_block_stop()
def _asktabwidth(self):
return self.askinteger(
"Tab width",
"Columns per tab? (2-16)",
parent=self.text,
initialvalue=self.indentwidth,
minvalue=2,
maxvalue=16) or self.tabwidth
# Guess indentwidth from text content.
# Return guessed indentwidth. This should not be believed unless
# it's in a reasonable range (e.g., it will be 0 if no indented
# blocks are found).
def guess_indent(self):
opener, indented = IndentSearcher(self.text, self.tabwidth).run()
if opener and indented:
raw, indentsmall = classifyws(opener, self.tabwidth)
raw, indentlarge = classifyws(indented, self.tabwidth)
else:
indentsmall = indentlarge = 0
return indentlarge - indentsmall
# "line.col" -> line, as an int
def index2line(index):
return int(float(index))
# Look at the leading whitespace in s.
# Return pair (# of leading ws characters,
# effective # of leading blanks after expanding
# tabs to width tabwidth)
def classifyws(s, tabwidth):
raw = effective = 0
for ch in s:
if ch == ' ':
raw = raw + 1
effective = effective + 1
elif ch == '\t':
raw = raw + 1
effective = (effective // tabwidth + 1) * tabwidth
else:
break
return raw, effective
import tokenize
_tokenize = tokenize
del tokenize
class IndentSearcher(object):
# .run() chews over the Text widget, looking for a block opener
# and the stmt following it. Returns a pair,
# (line containing block opener, line containing stmt)
# Either or both may be None.
def __init__(self, text, tabwidth):
self.text = text
self.tabwidth = tabwidth
self.i = self.finished = 0
self.blkopenline = self.indentedline = None
def readline(self):
if self.finished:
return ""
i = self.i = self.i + 1
mark = repr(i) + ".0"
if self.text.compare(mark, ">=", "end"):
return ""
return self.text.get(mark, mark + " lineend+1c")
def tokeneater(self, type, token, start, end, line,
INDENT=_tokenize.INDENT,
NAME=_tokenize.NAME,
OPENERS=('class', 'def', 'for', 'if', 'try', 'while')):
if self.finished:
pass
elif type == NAME and token in OPENERS:
self.blkopenline = line
elif type == INDENT and self.blkopenline:
self.indentedline = line
self.finished = 1
def run(self):
save_tabsize = _tokenize.tabsize
_tokenize.tabsize = self.tabwidth
try:
try:
tokens = _tokenize.generate_tokens(self.readline)
for token in tokens:
self.tokeneater(*token)
except (_tokenize.TokenError, SyntaxError):
# since we cut off the tokenizer early, we can trigger
# spurious errors
pass
finally:
_tokenize.tabsize = save_tabsize
return self.blkopenline, self.indentedline
### end autoindent code ###
def prepstr(s):
# Helper to extract the underscore from a string, e.g.
# prepstr("Co_py") returns (2, "Copy").
i = s.find('_')
if i >= 0:
s = s[:i] + s[i+1:]
return i, s
keynames = {
'bracketleft': '[',
'bracketright': ']',
'slash': '/',
}
def get_accelerator(keydefs, eventname):
keylist = keydefs.get(eventname)
# issue10940: temporary workaround to prevent hang with OS X Cocoa Tk 8.5
# if not keylist:
if (not keylist) or (macosxSupport.runningAsOSXApp() and eventname in {
"<<open-module>>",
"<<goto-line>>",
"<<change-indentwidth>>"}):
return ""
s = keylist[0]
s = re.sub(r"-[a-z]\b", lambda m: m.group().upper(), s)
s = re.sub(r"\b\w+\b", lambda m: keynames.get(m.group(), m.group()), s)
s = re.sub("Key-", "", s)
s = re.sub("Cancel","Ctrl-Break",s) # dscherer@cmu.edu
s = re.sub("Control-", "Ctrl-", s)
s = re.sub("-", "+", s)
s = re.sub("><", " ", s)
s = re.sub("<", "", s)
s = re.sub(">", "", s)
return s
def fixwordbreaks(root):
# Make sure that Tk's double-click and next/previous word
# operations use our definition of a word (i.e. an identifier)
tk = root.tk
tk.call('tcl_wordBreakAfter', 'a b', 0) # make sure word.tcl is loaded
tk.call('set', 'tcl_wordchars', '[a-zA-Z0-9_]')
tk.call('set', 'tcl_nonwordchars', '[^a-zA-Z0-9_]')
def test():
root = Tk()
fixwordbreaks(root)
root.withdraw()
if sys.argv[1:]:
filename = sys.argv[1]
else:
filename = None
edit = EditorWindow(root=root, filename=filename)
edit.set_close_hook(root.quit)
edit.text.bind("<<close-all-windows>>", edit.close_event)
root.mainloop()
root.destroy()
if __name__ == '__main__':
test()
| LaoZhongGu/kbengine | kbe/src/lib/python/Lib/idlelib/EditorWindow.py | Python | lgpl-3.0 | 65,343 |
# types.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Compatibility namespace for sqlalchemy.sql.types.
"""
__all__ = [
"TypeEngine",
"TypeDecorator",
"UserDefinedType",
"INT",
"CHAR",
"VARCHAR",
"NCHAR",
"NVARCHAR",
"TEXT",
"Text",
"FLOAT",
"NUMERIC",
"REAL",
"DECIMAL",
"TIMESTAMP",
"DATETIME",
"CLOB",
"BLOB",
"BINARY",
"VARBINARY",
"BOOLEAN",
"BIGINT",
"SMALLINT",
"INTEGER",
"DATE",
"TIME",
"String",
"Integer",
"SmallInteger",
"BigInteger",
"Numeric",
"Float",
"DateTime",
"Date",
"Time",
"LargeBinary",
"Boolean",
"Unicode",
"Concatenable",
"UnicodeText",
"PickleType",
"Interval",
"Enum",
"Indexable",
"ARRAY",
"JSON",
]
from .sql.sqltypes import _Binary # noqa
from .sql.sqltypes import ARRAY # noqa
from .sql.sqltypes import BIGINT # noqa
from .sql.sqltypes import BigInteger # noqa
from .sql.sqltypes import BINARY # noqa
from .sql.sqltypes import BLOB # noqa
from .sql.sqltypes import BOOLEAN # noqa
from .sql.sqltypes import Boolean # noqa
from .sql.sqltypes import CHAR # noqa
from .sql.sqltypes import CLOB # noqa
from .sql.sqltypes import Concatenable # noqa
from .sql.sqltypes import DATE # noqa
from .sql.sqltypes import Date # noqa
from .sql.sqltypes import DATETIME # noqa
from .sql.sqltypes import DateTime # noqa
from .sql.sqltypes import DECIMAL # noqa
from .sql.sqltypes import Enum # noqa
from .sql.sqltypes import FLOAT # noqa
from .sql.sqltypes import Float # noqa
from .sql.sqltypes import Indexable # noqa
from .sql.sqltypes import INT # noqa
from .sql.sqltypes import INTEGER # noqa
from .sql.sqltypes import Integer # noqa
from .sql.sqltypes import Interval # noqa
from .sql.sqltypes import JSON # noqa
from .sql.sqltypes import LargeBinary # noqa
from .sql.sqltypes import MatchType # noqa
from .sql.sqltypes import NCHAR # noqa
from .sql.sqltypes import NULLTYPE # noqa
from .sql.sqltypes import NullType # noqa
from .sql.sqltypes import NUMERIC # noqa
from .sql.sqltypes import Numeric # noqa
from .sql.sqltypes import NVARCHAR # noqa
from .sql.sqltypes import PickleType # noqa
from .sql.sqltypes import REAL # noqa
from .sql.sqltypes import SchemaType # noqa
from .sql.sqltypes import SMALLINT # noqa
from .sql.sqltypes import SmallInteger # noqa
from .sql.sqltypes import String # noqa
from .sql.sqltypes import STRINGTYPE # noqa
from .sql.sqltypes import TEXT # noqa
from .sql.sqltypes import Text # noqa
from .sql.sqltypes import TIME # noqa
from .sql.sqltypes import Time # noqa
from .sql.sqltypes import TIMESTAMP # noqa
from .sql.sqltypes import Unicode # noqa
from .sql.sqltypes import UnicodeText # noqa
from .sql.sqltypes import VARBINARY # noqa
from .sql.sqltypes import VARCHAR # noqa
from .sql.type_api import adapt_type # noqa
from .sql.type_api import to_instance # noqa
from .sql.type_api import TypeDecorator # noqa
from .sql.type_api import TypeEngine # noqa
from .sql.type_api import UserDefinedType # noqa
from .sql.type_api import Variant # noqa
| graingert/sqlalchemy | lib/sqlalchemy/types.py | Python | mit | 3,322 |
from ansible.plugins.callback import CallbackBase
class PlaybookCallback(CallbackBase):
"""Playbook callback"""
def __init__(self):
super(PlaybookCallback, self).__init__()
# store all results
self.results = []
def v2_runner_on_ok(self, result):
"""Save ok result"""
self.results.append(result)
def v2_runner_on_failed(self, result, **kwargs):
"""Save failed result"""
self.results.append(result)
| agharibi/linchpin | linchpin/api/callbacks.py | Python | gpl-3.0 | 480 |
from mock import MagicMock
import mock
from django.test import override_settings
from tests.utilities.utils import SafeTestCase
from tests.utilities.ldap import get_ldap_user_defaults
from accounts.models import (
User,
AccountRequest,
Intent
)
from projects.models import Project
from projects.receivers import check_general_eligibility
organization_info = {
'ucb': {
'long_name': 'University of Colorado Boulder',
'suffix': None,
'general_project_id': 'ucb-general'
},
'csu': {
'long_name': 'Colorado State University',
'suffix': 'colostate.edu',
'general_project_id': 'csu-general'
}
}
@override_settings(ORGANIZATION_INFO=organization_info)
class GeneralEligibilityReceiverTestCase(SafeTestCase):
def test_check_general_eligibility(self):
user_defaults = get_ldap_user_defaults()
auth_user_defaults = dict(
username=user_defaults['username'],
first_name=user_defaults['first_name'],
last_name=user_defaults['last_name'],
email=user_defaults['email']
)
auth_user = User.objects.create(**auth_user_defaults)
account_request_defaults = dict(
username=auth_user.username,
first_name=auth_user.first_name,
last_name=auth_user.last_name,
email=auth_user.email,
organization='ucb'
)
account_request = AccountRequest.objects.create(**account_request_defaults)
intent = Intent.objects.create(
account_request=account_request,
reason_summit=True
)
project_defaults = dict(
pi_emails=['pi@email.org'],
description='test project',
organization='ucb',
title='test project',
project_id='ucb-general'
)
project = Project.objects.create(**project_defaults)
check_general_eligibility(account_request.__class__,account_request=account_request)
project = Project.objects.get()
self.assertIn(auth_user,project.collaborators.all())
# No Summit intention declared, now add to 'general' account anyway
project.collaborators.clear()
intent.reason_summit = False
intent.save()
check_general_eligibility(account_request.__class__,account_request=account_request)
project = Project.objects.get()
self.assertIn(auth_user,project.collaborators.all())
def test_check_general_eligibility_suffixed(self):
user_defaults = get_ldap_user_defaults()
effective_uid = '{}@colostate.edu'.format(user_defaults['username'])
auth_user_defaults = dict(
username=effective_uid,
first_name=user_defaults['first_name'],
last_name=user_defaults['last_name'],
email=user_defaults['email']
)
auth_user = User.objects.create(**auth_user_defaults)
account_request_defaults = dict(
username=user_defaults['username'],
first_name=auth_user.first_name,
last_name=auth_user.last_name,
email=auth_user.email,
organization='csu'
)
account_request = AccountRequest.objects.create(**account_request_defaults)
intent = Intent.objects.create(
account_request=account_request,
reason_summit=True
)
project_defaults = dict(
pi_emails=['pi@email.org'],
description='test project',
organization='csu',
title='test project',
project_id='csu-general'
)
project = Project.objects.create(**project_defaults)
check_general_eligibility(account_request.__class__,account_request=account_request)
project = Project.objects.get()
self.assertIn(auth_user,project.collaborators.all())
def test_check_general_eligibility_no_intent(self):
user_defaults = get_ldap_user_defaults()
auth_user_defaults = dict(
username=user_defaults['username'],
first_name=user_defaults['first_name'],
last_name=user_defaults['last_name'],
email=user_defaults['email']
)
auth_user = User.objects.create(**auth_user_defaults)
account_request_defaults = dict(
username=auth_user.username,
first_name=auth_user.first_name,
last_name=auth_user.last_name,
email=auth_user.email,
organization='ucb'
)
account_request = AccountRequest.objects.create(**account_request_defaults)
check_general_eligibility(account_request.__class__,account_request=account_request)
| ResearchComputing/RCAMP | rcamp/tests/test_projects_receivers.py | Python | mit | 4,732 |
from . import product_config
from . import product_attribute
from . import product
| pledra/odoo-product-configurator | product_configurator/models/__init__.py | Python | agpl-3.0 | 83 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RDeoptim(RPackage):
"""Implements the differential evolution algorithm for global optimization
of a real-valued function of a real-valued parameter vector."""
homepage = "https://cloud.r-project.org/package=DEoptim"
url = "https://cloud.r-project.org/src/contrib/DEoptim_2.2-3.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/DEoptim"
version('2.2-4', sha256='0a547784090d1e9b93efc53768110621f35bed3692864f6ce5c0dda2ebd6d482')
version('2.2-3', sha256='af2120feea3a736ee7a5a93c6767d464abc0d45ce75568074b233405e73c9a5d')
| rspavel/spack | var/spack/repos/builtin/packages/r-deoptim/package.py | Python | lgpl-2.1 | 800 |
"""
Tests for Discussion API serializers
"""
import itertools
from urlparse import urlparse
import ddt
import httpretty
import mock
from nose.plugins.attrib import attr
from django.test.client import RequestFactory
from discussion_api.serializers import CommentSerializer, ThreadSerializer, get_context
from discussion_api.tests.utils import (
CommentsServiceMockMixin,
make_minimal_cs_thread,
make_minimal_cs_comment,
)
from django_comment_common.models import (
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_STUDENT,
Role,
)
from lms.lib.comment_client.comment import Comment
from lms.lib.comment_client.thread import Thread
from student.tests.factories import UserFactory
from util.testing import UrlResetMixin
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory
@ddt.ddt
class SerializerTestMixin(CommentsServiceMockMixin, UrlResetMixin):
@classmethod
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUpClass(cls):
super(SerializerTestMixin, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(SerializerTestMixin, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.maxDiff = None # pylint: disable=invalid-name
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
self.author = UserFactory.create()
def create_role(self, role_name, users, course=None):
"""Create a Role in self.course with the given name and users"""
course = course or self.course
role = Role.objects.create(name=role_name, course_id=course.id)
role.users = users
@ddt.data(
(FORUM_ROLE_ADMINISTRATOR, True, False, True),
(FORUM_ROLE_ADMINISTRATOR, False, True, False),
(FORUM_ROLE_MODERATOR, True, False, True),
(FORUM_ROLE_MODERATOR, False, True, False),
(FORUM_ROLE_COMMUNITY_TA, True, False, True),
(FORUM_ROLE_COMMUNITY_TA, False, True, False),
(FORUM_ROLE_STUDENT, True, False, True),
(FORUM_ROLE_STUDENT, False, True, True),
)
@ddt.unpack
def test_anonymity(self, role_name, anonymous, anonymous_to_peers, expected_serialized_anonymous):
"""
Test that content is properly made anonymous.
Content should be anonymous iff the anonymous field is true or the
anonymous_to_peers field is true and the requester does not have a
privileged role.
role_name is the name of the requester's role.
anonymous is the value of the anonymous field in the content.
anonymous_to_peers is the value of the anonymous_to_peers field in the
content.
expected_serialized_anonymous is whether the content should actually be
anonymous in the API output when requested by a user with the given
role.
"""
self.create_role(role_name, [self.user])
serialized = self.serialize(
self.make_cs_content({"anonymous": anonymous, "anonymous_to_peers": anonymous_to_peers})
)
actual_serialized_anonymous = serialized["author"] is None
self.assertEqual(actual_serialized_anonymous, expected_serialized_anonymous)
@ddt.data(
(FORUM_ROLE_ADMINISTRATOR, False, "Staff"),
(FORUM_ROLE_ADMINISTRATOR, True, None),
(FORUM_ROLE_MODERATOR, False, "Staff"),
(FORUM_ROLE_MODERATOR, True, None),
(FORUM_ROLE_COMMUNITY_TA, False, "Community TA"),
(FORUM_ROLE_COMMUNITY_TA, True, None),
(FORUM_ROLE_STUDENT, False, None),
(FORUM_ROLE_STUDENT, True, None),
)
@ddt.unpack
def test_author_labels(self, role_name, anonymous, expected_label):
"""
Test correctness of the author_label field.
The label should be "Staff", "Staff", or "Community TA" for the
Administrator, Moderator, and Community TA roles, respectively, but
the label should not be present if the content is anonymous.
role_name is the name of the author's role.
anonymous is the value of the anonymous field in the content.
expected_label is the expected value of the author_label field in the
API output.
"""
self.create_role(role_name, [self.author])
serialized = self.serialize(self.make_cs_content({"anonymous": anonymous}))
self.assertEqual(serialized["author_label"], expected_label)
def test_abuse_flagged(self):
serialized = self.serialize(self.make_cs_content({"abuse_flaggers": [str(self.user.id)]}))
self.assertEqual(serialized["abuse_flagged"], True)
def test_voted(self):
thread_id = "test_thread"
self.register_get_user_response(self.user, upvoted_ids=[thread_id])
serialized = self.serialize(self.make_cs_content({"id": thread_id}))
self.assertEqual(serialized["voted"], True)
@attr(shard=3)
@ddt.ddt
class ThreadSerializerSerializationTest(SerializerTestMixin, SharedModuleStoreTestCase):
"""Tests for ThreadSerializer serialization."""
def make_cs_content(self, overrides):
"""
Create a thread with the given overrides, plus some useful test data.
"""
merged_overrides = {
"course_id": unicode(self.course.id),
"user_id": str(self.author.id),
"username": self.author.username,
"read": True,
"endorsed": True,
"resp_total": 0,
}
merged_overrides.update(overrides)
return make_minimal_cs_thread(merged_overrides)
def serialize(self, thread):
"""
Create a serializer with an appropriate context and use it to serialize
the given thread, returning the result.
"""
return ThreadSerializer(thread, context=get_context(self.course, self.request)).data
def test_basic(self):
thread = {
"type": "thread",
"id": "test_thread",
"course_id": unicode(self.course.id),
"commentable_id": "test_topic",
"group_id": None,
"user_id": str(self.author.id),
"username": self.author.username,
"anonymous": False,
"anonymous_to_peers": False,
"created_at": "2015-04-28T00:00:00Z",
"updated_at": "2015-04-28T11:11:11Z",
"thread_type": "discussion",
"title": "Test Title",
"body": "Test body",
"pinned": True,
"closed": False,
"abuse_flaggers": [],
"votes": {"up_count": 4},
"comments_count": 5,
"unread_comments_count": 3,
"read": False,
"endorsed": False,
"response_count": None,
}
expected = {
"id": "test_thread",
"course_id": unicode(self.course.id),
"topic_id": "test_topic",
"group_id": None,
"group_name": None,
"author": self.author.username,
"author_label": None,
"created_at": "2015-04-28T00:00:00Z",
"updated_at": "2015-04-28T11:11:11Z",
"type": "discussion",
"title": "Test Title",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"pinned": True,
"closed": False,
"following": False,
"abuse_flagged": False,
"voted": False,
"vote_count": 4,
"comment_count": 6,
"unread_comment_count": 4,
"comment_list_url": "http://testserver/api/discussion/v1/comments/?thread_id=test_thread",
"endorsed_comment_list_url": None,
"non_endorsed_comment_list_url": None,
"editable_fields": ["abuse_flagged", "following", "read", "voted"],
"read": False,
"has_endorsed": False,
}
self.assertEqual(self.serialize(thread), expected)
thread["thread_type"] = "question"
expected.update({
"type": "question",
"comment_list_url": None,
"endorsed_comment_list_url": (
"http://testserver/api/discussion/v1/comments/?thread_id=test_thread&endorsed=True"
),
"non_endorsed_comment_list_url": (
"http://testserver/api/discussion/v1/comments/?thread_id=test_thread&endorsed=False"
),
})
self.assertEqual(self.serialize(thread), expected)
def test_pinned_missing(self):
"""
Make sure that older threads in the comments service without the pinned
field do not break serialization
"""
thread_data = self.make_cs_content({})
del thread_data["pinned"]
self.register_get_thread_response(thread_data)
serialized = self.serialize(thread_data)
self.assertEqual(serialized["pinned"], False)
def test_group(self):
cohort = CohortFactory.create(course_id=self.course.id)
serialized = self.serialize(self.make_cs_content({"group_id": cohort.id}))
self.assertEqual(serialized["group_id"], cohort.id)
self.assertEqual(serialized["group_name"], cohort.name)
def test_following(self):
thread_id = "test_thread"
self.register_get_user_response(self.user, subscribed_thread_ids=[thread_id])
serialized = self.serialize(self.make_cs_content({"id": thread_id}))
self.assertEqual(serialized["following"], True)
def test_response_count(self):
thread_data = self.make_cs_content({"resp_total": 2})
self.register_get_thread_response(thread_data)
serialized = self.serialize(thread_data)
self.assertEqual(serialized["response_count"], 2)
def test_response_count_missing(self):
thread_data = self.make_cs_content({})
del thread_data["resp_total"]
self.register_get_thread_response(thread_data)
serialized = self.serialize(thread_data)
self.assertNotIn("response_count", serialized)
@ddt.ddt
class CommentSerializerTest(SerializerTestMixin, SharedModuleStoreTestCase):
"""Tests for CommentSerializer."""
def setUp(self):
super(CommentSerializerTest, self).setUp()
self.endorser = UserFactory.create()
self.endorsed_at = "2015-05-18T12:34:56Z"
def make_cs_content(self, overrides=None, with_endorsement=False):
"""
Create a comment with the given overrides, plus some useful test data.
"""
merged_overrides = {
"user_id": str(self.author.id),
"username": self.author.username
}
if with_endorsement:
merged_overrides["endorsement"] = {
"user_id": str(self.endorser.id),
"time": self.endorsed_at
}
merged_overrides.update(overrides or {})
return make_minimal_cs_comment(merged_overrides)
def serialize(self, comment, thread_data=None):
"""
Create a serializer with an appropriate context and use it to serialize
the given comment, returning the result.
"""
context = get_context(self.course, self.request, make_minimal_cs_thread(thread_data))
return CommentSerializer(comment, context=context).data
def test_basic(self):
comment = {
"type": "comment",
"id": "test_comment",
"thread_id": "test_thread",
"user_id": str(self.author.id),
"username": self.author.username,
"anonymous": False,
"anonymous_to_peers": False,
"created_at": "2015-04-28T00:00:00Z",
"updated_at": "2015-04-28T11:11:11Z",
"body": "Test body",
"endorsed": False,
"abuse_flaggers": [],
"votes": {"up_count": 4},
"children": [],
"child_count": 0,
}
expected = {
"id": "test_comment",
"thread_id": "test_thread",
"parent_id": None,
"author": self.author.username,
"author_label": None,
"created_at": "2015-04-28T00:00:00Z",
"updated_at": "2015-04-28T11:11:11Z",
"raw_body": "Test body",
"rendered_body": "<p>Test body</p>",
"endorsed": False,
"endorsed_by": None,
"endorsed_by_label": None,
"endorsed_at": None,
"abuse_flagged": False,
"voted": False,
"vote_count": 4,
"children": [],
"editable_fields": ["abuse_flagged", "voted"],
"child_count": 0,
}
self.assertEqual(self.serialize(comment), expected)
@ddt.data(
*itertools.product(
[
FORUM_ROLE_ADMINISTRATOR,
FORUM_ROLE_MODERATOR,
FORUM_ROLE_COMMUNITY_TA,
FORUM_ROLE_STUDENT,
],
[True, False]
)
)
@ddt.unpack
def test_endorsed_by(self, endorser_role_name, thread_anonymous):
"""
Test correctness of the endorsed_by field.
The endorser should be anonymous iff the thread is anonymous to the
requester, and the endorser is not a privileged user.
endorser_role_name is the name of the endorser's role.
thread_anonymous is the value of the anonymous field in the thread.
"""
self.create_role(endorser_role_name, [self.endorser])
serialized = self.serialize(
self.make_cs_content(with_endorsement=True),
thread_data={"anonymous": thread_anonymous}
)
actual_endorser_anonymous = serialized["endorsed_by"] is None
expected_endorser_anonymous = endorser_role_name == FORUM_ROLE_STUDENT and thread_anonymous
self.assertEqual(actual_endorser_anonymous, expected_endorser_anonymous)
@ddt.data(
(FORUM_ROLE_ADMINISTRATOR, "Staff"),
(FORUM_ROLE_MODERATOR, "Staff"),
(FORUM_ROLE_COMMUNITY_TA, "Community TA"),
(FORUM_ROLE_STUDENT, None),
)
@ddt.unpack
def test_endorsed_by_labels(self, role_name, expected_label):
"""
Test correctness of the endorsed_by_label field.
The label should be "Staff", "Staff", or "Community TA" for the
Administrator, Moderator, and Community TA roles, respectively.
role_name is the name of the author's role.
expected_label is the expected value of the author_label field in the
API output.
"""
self.create_role(role_name, [self.endorser])
serialized = self.serialize(self.make_cs_content(with_endorsement=True))
self.assertEqual(serialized["endorsed_by_label"], expected_label)
def test_endorsed_at(self):
serialized = self.serialize(self.make_cs_content(with_endorsement=True))
self.assertEqual(serialized["endorsed_at"], self.endorsed_at)
def test_children(self):
comment = self.make_cs_content({
"id": "test_root",
"children": [
self.make_cs_content({
"id": "test_child_1",
"parent_id": "test_root",
}),
self.make_cs_content({
"id": "test_child_2",
"parent_id": "test_root",
"children": [
self.make_cs_content({
"id": "test_grandchild",
"parent_id": "test_child_2"
})
],
}),
],
})
serialized = self.serialize(comment)
self.assertEqual(serialized["children"][0]["id"], "test_child_1")
self.assertEqual(serialized["children"][0]["parent_id"], "test_root")
self.assertEqual(serialized["children"][1]["id"], "test_child_2")
self.assertEqual(serialized["children"][1]["parent_id"], "test_root")
self.assertEqual(serialized["children"][1]["children"][0]["id"], "test_grandchild")
self.assertEqual(serialized["children"][1]["children"][0]["parent_id"], "test_child_2")
@ddt.ddt
class ThreadSerializerDeserializationTest(CommentsServiceMockMixin, UrlResetMixin, SharedModuleStoreTestCase):
"""Tests for ThreadSerializer deserialization."""
@classmethod
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUpClass(cls):
super(ThreadSerializerDeserializationTest, cls).setUpClass()
cls.course = CourseFactory.create()
@mock.patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(ThreadSerializerDeserializationTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
self.minimal_data = {
"course_id": unicode(self.course.id),
"topic_id": "test_topic",
"type": "discussion",
"title": "Test Title",
"raw_body": "Test body",
}
self.existing_thread = Thread(**make_minimal_cs_thread({
"id": "existing_thread",
"course_id": unicode(self.course.id),
"commentable_id": "original_topic",
"thread_type": "discussion",
"title": "Original Title",
"body": "Original body",
"user_id": str(self.user.id),
"username": self.user.username,
"read": "False",
"endorsed": "False"
}))
def save_and_reserialize(self, data, instance=None):
"""
Create a serializer with the given data and (if updating) instance,
ensure that it is valid, save the result, and return the full thread
data from the serializer.
"""
serializer = ThreadSerializer(
instance,
data=data,
partial=(instance is not None),
context=get_context(self.course, self.request)
)
self.assertTrue(serializer.is_valid())
serializer.save()
return serializer.data
def test_create_minimal(self):
self.register_post_thread_response({"id": "test_id", "username": self.user.username})
saved = self.save_and_reserialize(self.minimal_data)
self.assertEqual(
urlparse(httpretty.last_request().path).path,
"/api/v1/test_topic/threads"
)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"commentable_id": ["test_topic"],
"thread_type": ["discussion"],
"title": ["Test Title"],
"body": ["Test body"],
"user_id": [str(self.user.id)],
}
)
self.assertEqual(saved["id"], "test_id")
def test_create_all_fields(self):
self.register_post_thread_response({"id": "test_id", "username": self.user.username})
data = self.minimal_data.copy()
data["group_id"] = 42
self.save_and_reserialize(data)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"commentable_id": ["test_topic"],
"thread_type": ["discussion"],
"title": ["Test Title"],
"body": ["Test body"],
"user_id": [str(self.user.id)],
"group_id": ["42"],
}
)
def test_create_missing_field(self):
for field in self.minimal_data:
data = self.minimal_data.copy()
data.pop(field)
serializer = ThreadSerializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertEqual(
serializer.errors,
{field: ["This field is required."]}
)
@ddt.data("", " ")
def test_create_empty_string(self, value):
data = self.minimal_data.copy()
data.update({field: value for field in ["topic_id", "title", "raw_body"]})
serializer = ThreadSerializer(data=data, context=get_context(self.course, self.request))
self.assertFalse(serializer.is_valid())
self.assertEqual(
serializer.errors,
{field: ["This field may not be blank."] for field in ["topic_id", "title", "raw_body"]}
)
def test_create_type(self):
self.register_post_thread_response({"id": "test_id", "username": self.user.username})
data = self.minimal_data.copy()
data["type"] = "question"
self.save_and_reserialize(data)
data["type"] = "invalid_type"
serializer = ThreadSerializer(data=data)
self.assertFalse(serializer.is_valid())
def test_update_empty(self):
self.register_put_thread_response(self.existing_thread.attributes)
self.save_and_reserialize({}, self.existing_thread)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"commentable_id": ["original_topic"],
"thread_type": ["discussion"],
"title": ["Original Title"],
"body": ["Original body"],
"anonymous": ["False"],
"anonymous_to_peers": ["False"],
"closed": ["False"],
"pinned": ["False"],
"user_id": [str(self.user.id)],
"read": ["False"],
}
)
@ddt.data(True, False)
def test_update_all(self, read):
self.register_put_thread_response(self.existing_thread.attributes)
data = {
"topic_id": "edited_topic",
"type": "question",
"title": "Edited Title",
"raw_body": "Edited body",
"read": read,
}
saved = self.save_and_reserialize(data, self.existing_thread)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"commentable_id": ["edited_topic"],
"thread_type": ["question"],
"title": ["Edited Title"],
"body": ["Edited body"],
"anonymous": ["False"],
"anonymous_to_peers": ["False"],
"closed": ["False"],
"pinned": ["False"],
"user_id": [str(self.user.id)],
"read": [str(read)],
}
)
for key in data:
self.assertEqual(saved[key], data[key])
@ddt.data("", " ")
def test_update_empty_string(self, value):
serializer = ThreadSerializer(
self.existing_thread,
data={field: value for field in ["topic_id", "title", "raw_body"]},
partial=True,
context=get_context(self.course, self.request)
)
self.assertFalse(serializer.is_valid())
self.assertEqual(
serializer.errors,
{field: ["This field may not be blank."] for field in ["topic_id", "title", "raw_body"]}
)
def test_update_course_id(self):
serializer = ThreadSerializer(
self.existing_thread,
data={"course_id": "some/other/course"},
partial=True,
context=get_context(self.course, self.request)
)
self.assertFalse(serializer.is_valid())
self.assertEqual(
serializer.errors,
{"course_id": ["This field is not allowed in an update."]}
)
@ddt.ddt
class CommentSerializerDeserializationTest(CommentsServiceMockMixin, SharedModuleStoreTestCase):
"""Tests for ThreadSerializer deserialization."""
@classmethod
def setUpClass(cls):
super(CommentSerializerDeserializationTest, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(CommentSerializerDeserializationTest, self).setUp()
httpretty.reset()
httpretty.enable()
self.addCleanup(httpretty.disable)
self.user = UserFactory.create()
self.register_get_user_response(self.user)
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
self.minimal_data = {
"thread_id": "test_thread",
"raw_body": "Test body",
}
self.existing_comment = Comment(**make_minimal_cs_comment({
"id": "existing_comment",
"thread_id": "existing_thread",
"body": "Original body",
"user_id": str(self.user.id),
"username": self.user.username,
"course_id": unicode(self.course.id),
}))
def save_and_reserialize(self, data, instance=None):
"""
Create a serializer with the given data, ensure that it is valid, save
the result, and return the full comment data from the serializer.
"""
context = get_context(
self.course,
self.request,
make_minimal_cs_thread({"course_id": unicode(self.course.id)})
)
serializer = CommentSerializer(
instance,
data=data,
partial=(instance is not None),
context=context
)
self.assertTrue(serializer.is_valid())
serializer.save()
return serializer.data
@ddt.data(None, "test_parent")
def test_create_success(self, parent_id):
data = self.minimal_data.copy()
if parent_id:
data["parent_id"] = parent_id
self.register_get_comment_response({"thread_id": "test_thread", "id": parent_id})
self.register_post_comment_response(
{"id": "test_comment", "username": self.user.username},
thread_id="test_thread",
parent_id=parent_id
)
saved = self.save_and_reserialize(data)
expected_url = (
"/api/v1/comments/{}".format(parent_id) if parent_id else
"/api/v1/threads/test_thread/comments"
)
self.assertEqual(urlparse(httpretty.last_request().path).path, expected_url)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"body": ["Test body"],
"user_id": [str(self.user.id)],
}
)
self.assertEqual(saved["id"], "test_comment")
self.assertEqual(saved["parent_id"], parent_id)
def test_create_all_fields(self):
data = self.minimal_data.copy()
data["parent_id"] = "test_parent"
data["endorsed"] = True
self.register_get_comment_response({"thread_id": "test_thread", "id": "test_parent"})
self.register_post_comment_response(
{"id": "test_comment", "username": self.user.username},
thread_id="test_thread",
parent_id="test_parent"
)
self.save_and_reserialize(data)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"body": ["Test body"],
"user_id": [str(self.user.id)],
"endorsed": ["True"],
}
)
def test_create_parent_id_nonexistent(self):
self.register_get_comment_error_response("bad_parent", 404)
data = self.minimal_data.copy()
data["parent_id"] = "bad_parent"
context = get_context(self.course, self.request, make_minimal_cs_thread())
serializer = CommentSerializer(data=data, context=context)
self.assertFalse(serializer.is_valid())
self.assertEqual(
serializer.errors,
{
"non_field_errors": [
"parent_id does not identify a comment in the thread identified by thread_id."
]
}
)
def test_create_parent_id_wrong_thread(self):
self.register_get_comment_response({"thread_id": "different_thread", "id": "test_parent"})
data = self.minimal_data.copy()
data["parent_id"] = "test_parent"
context = get_context(self.course, self.request, make_minimal_cs_thread())
serializer = CommentSerializer(data=data, context=context)
self.assertFalse(serializer.is_valid())
self.assertEqual(
serializer.errors,
{
"non_field_errors": [
"parent_id does not identify a comment in the thread identified by thread_id."
]
}
)
@ddt.data(None, -1, 0, 2, 5)
def test_create_parent_id_too_deep(self, max_depth):
with mock.patch("django_comment_client.utils.MAX_COMMENT_DEPTH", max_depth):
data = self.minimal_data.copy()
context = get_context(self.course, self.request, make_minimal_cs_thread())
if max_depth is None or max_depth >= 0:
if max_depth != 0:
self.register_get_comment_response({
"id": "not_too_deep",
"thread_id": "test_thread",
"depth": max_depth - 1 if max_depth else 100
})
data["parent_id"] = "not_too_deep"
else:
data["parent_id"] = None
serializer = CommentSerializer(data=data, context=context)
self.assertTrue(serializer.is_valid(), serializer.errors)
if max_depth is not None:
if max_depth >= 0:
self.register_get_comment_response({
"id": "too_deep",
"thread_id": "test_thread",
"depth": max_depth
})
data["parent_id"] = "too_deep"
else:
data["parent_id"] = None
serializer = CommentSerializer(data=data, context=context)
self.assertFalse(serializer.is_valid())
self.assertEqual(serializer.errors, {"non_field_errors": ["Comment level is too deep."]})
def test_create_missing_field(self):
for field in self.minimal_data:
data = self.minimal_data.copy()
data.pop(field)
serializer = CommentSerializer(
data=data,
context=get_context(self.course, self.request, make_minimal_cs_thread())
)
self.assertFalse(serializer.is_valid())
self.assertEqual(
serializer.errors,
{field: ["This field is required."]}
)
def test_create_endorsed(self):
# TODO: The comments service doesn't populate the endorsement field on
# comment creation, so this is sadly realistic
self.register_post_comment_response({"username": self.user.username}, thread_id="test_thread")
data = self.minimal_data.copy()
data["endorsed"] = True
saved = self.save_and_reserialize(data)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"course_id": [unicode(self.course.id)],
"body": ["Test body"],
"user_id": [str(self.user.id)],
"endorsed": ["True"],
}
)
self.assertTrue(saved["endorsed"])
self.assertIsNone(saved["endorsed_by"])
self.assertIsNone(saved["endorsed_by_label"])
self.assertIsNone(saved["endorsed_at"])
def test_update_empty(self):
self.register_put_comment_response(self.existing_comment.attributes)
self.save_and_reserialize({}, instance=self.existing_comment)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"body": ["Original body"],
"course_id": [unicode(self.course.id)],
"user_id": [str(self.user.id)],
"anonymous": ["False"],
"anonymous_to_peers": ["False"],
"endorsed": ["False"],
}
)
def test_update_all(self):
cs_response_data = self.existing_comment.attributes.copy()
cs_response_data["endorsement"] = {
"user_id": str(self.user.id),
"time": "2015-06-05T00:00:00Z",
}
self.register_put_comment_response(cs_response_data)
data = {"raw_body": "Edited body", "endorsed": True}
saved = self.save_and_reserialize(data, instance=self.existing_comment)
self.assertEqual(
httpretty.last_request().parsed_body,
{
"body": ["Edited body"],
"course_id": [unicode(self.course.id)],
"user_id": [str(self.user.id)],
"anonymous": ["False"],
"anonymous_to_peers": ["False"],
"endorsed": ["True"],
"endorsement_user_id": [str(self.user.id)],
}
)
for key in data:
self.assertEqual(saved[key], data[key])
self.assertEqual(saved["endorsed_by"], self.user.username)
self.assertEqual(saved["endorsed_at"], "2015-06-05T00:00:00Z")
@ddt.data("", " ")
def test_update_empty_raw_body(self, value):
serializer = CommentSerializer(
self.existing_comment,
data={"raw_body": value},
partial=True,
context=get_context(self.course, self.request)
)
self.assertFalse(serializer.is_valid())
self.assertEqual(
serializer.errors,
{"raw_body": ["This field may not be blank."]}
)
@ddt.data("thread_id", "parent_id")
def test_update_non_updatable(self, field):
serializer = CommentSerializer(
self.existing_comment,
data={field: "different_value"},
partial=True,
context=get_context(self.course, self.request)
)
self.assertFalse(serializer.is_valid())
self.assertEqual(
serializer.errors,
{field: ["This field is not allowed in an update."]}
)
| longmen21/edx-platform | lms/djangoapps/discussion_api/tests/test_serializers.py | Python | agpl-3.0 | 34,969 |
# Copyright (C) 2013 Oskar Maier
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# author Oskar Maier
# version r0.5.1
# since 2011-11-25
# status Release
# build-in modules
# third-party modules
import scipy
import itk
# path changes
# own modules
from ...core import ImageLoadingError, DependencyError, ImageTypeError, Logger
# code
def getInformation(image): # tested
"""
Returns an information string about a ITK image in a compressed way.
Parameters
----------
image : itk.Image
An ITK image as used by WrapITK.
Returns
-------
information : string
Pretty-formatted string with image metadata.
Notes
-----
Performs UpdateOutputInformation() on the image, therefore triggering pipeline processing if necessary
Only works on 3D images.
"""
# refresh information
image.UpdateOutputInformation()
# request information and format string
s = 'itkImageData info:\n'
s += '\tscalar-type: {}\n'.format(str(itk.template(image)[1][0]))
rs = image.GetLargestPossibleRegion().GetSize()
s += '\tdimensions: {}\n'.format([rs.GetElement(x) for x in range(rs.GetSizeDimension())])
sp = image.GetSpacing()
s += '\tspacing: {}\n'.format([sp.GetElement(x) for x in range(rs.GetSizeDimension())])
o = image.GetOrigin()
s += '\torigin: {}\n'.format([o.GetElement(x) for x in range(rs.GetSizeDimension())])
s += '\tdata dim.: {}'.format(str(itk.template(image)[1][1])) # alternative impl. for when GetImageDimension() fails
return s
def getInformationWithScalarRange(image): # tested
"""
Behaves like `getInformation` but also computes the intensity range, which is computationally expensive.
Parameters
----------
image : itk.Image
An ITK image as used by WrapITK.
Returns
-------
information : string
Pretty-formatted string with image metadata and intensity range.
Notes
-----
Performs UpdateOutputInformation() on the image, therefore triggering pipeline processing if necessary
"""
s = getInformation(image)
# refresh data
image.Update()
# initiate max/min intensity value computer
calc = itk.MinimumMaximumImageCalculator[getImageType(image)].New()
calc.SetImage(image)
calc.Compute()
s += '\n'
s += '\tscalar-range: (' + str(calc.GetMinimum()) + ', ' + str(calc.GetMaximum()) + ')\n'
return s
def saveImageMetaIO(image, file_name): # tested
"""
Saves the image data into a file as MetaIO format.
Parameters
----------
image : itk.Image
An ITK image as used by WrapITK.
file_name : string
Path and file name (without suffix) where to save the image.
Notes
-----
A write operation will trigger the image pipeline to be processed.
"""
saveImage(image, file_name + '.mhd')
def saveImage(image, file_name): # tested
"""
Saves the image data into a file in the format specified by the file name suffix.
Parameters
----------
image : itk.Image
An ITK image as used by WrapITK.
file_name : string
Path and file name (with suffix) where to save the image.
Notes
-----
A write operation will trigger the image pipeline to be processed.
"""
# retrieve image type
image_type = getImageType(image)
writer = itk.ImageFileWriter[image_type].New()
writer.SetInput(image)
writer.SetFileName(file_name)
writer.Write()
def getImageFromArray(arr, image_type = False):
"""
Returns an itk Image created from the supplied numpy ndarray.
If the ``image_type`` is supported, will be automatically transformed to that type,
otherwise the most suitable is selected.
Parameters
----------
arr : array_like
The image as numpy array.
image_type : itk.Image (template)
An itk image type.
Returns
-------
itk_image : itk.Image (instance)
An instance of itk.Image holding the array's data.
Notes
-----
Always use this instead of directly the itk.PyBuffer, as that object transposes the image axes.
"""
# The itk_py_converter transposes the image dimensions. This has to be countered.
arr = scipy.transpose(arr)
# determine image type if not supplied
if not image_type:
image_type = getImageTypeFromArray(arr)
# convert
itk_py_converter = itk.PyBuffer[image_type]
return itk_py_converter.GetImageFromArray(arr)
def getArrayFromImage(image):
"""
Returns a numpy ndarray created from the supplied itk image.
Parameters
----------
image : itk.Image (instance)
An instance of itk.Image holding the array's data.
Returns
-------
arr : ndarray
The image as numpy array.
Notes
-----
Always use this instead of directly the itk.PyBuffer, as that object transposes the image axes.
"""
#convert
itk_py_converter = itk.PyBuffer[getImageType(image)]
arr = itk_py_converter.GetArrayFromImage(image)
############
# !BUG: WarpITK is a very critical itk wrapper. Everything returned / created by it
# seems to exist only inside the scope of the current function (at least for
# ImageFileReader's image and PyBuffers's scipy array. The returned objects have
# therefore to be copied once, to survive outside the current scope.
############
arr = arr.copy()
# The itk_py_converter transposes the image dimensions. This has to be countered.
return scipy.squeeze(scipy.transpose(arr))
def getImageType(image): # tested
"""
Returns the image type of the supplied image as itk.Image template.
Parameters
----------
image : itk.Image (instance)
An instance of itk.Image.
Returns
-------
image_type : itk.Image (template)
An itk image type.
"""
try:
return itk.Image[itk.template(image)[1][0],
itk.template(image)[1][1]]
except IndexError as _:
raise NotImplementedError, 'The python wrappers of ITK define no template class for this data type.'
def getImageTypeFromArray(arr): # tested
"""
Returns the image type of the supplied array as itk.Image template.
Parameters
----------
arr : array_like
The image as numpy array.
Returns
-------
image_type : itk.Image (template)
An itk image type.
Raises
------
DependencyError
If the itk wrapper do not support the target image type
ImageTypeError
If the array dtype is unsupported
"""
# mapping from scipy to the possible itk types, in order from most to least suitable
# ! this assumes char=8bit, short=16bit and long=32bit (minimal values)
scipy_to_itk_types = {scipy.bool_: [itk.SS, itk.UC, itk.US, itk.SS, itk.UL, itk.SL],
scipy.uint8: [itk.UC, itk.US, itk.SS, itk.UL, itk.SL],
scipy.uint16: [itk.US, itk.UL, itk.SL],
scipy.uint32: [itk.UL],
scipy.uint64: [],
scipy.int8: [itk.SC, itk.SS, itk.SL],
scipy.int16: [itk.SS, itk.SL],
scipy.int32: [itk.SL],
scipy.int64: [],
scipy.float32: [itk.F, itk.D],
scipy.float64: [itk.D],
scipy.float128: []}
if arr.ndim <= 1:
raise DependencyError('Itk does not support images with less than 2 dimensions.')
# chek for unknown array data type
if not arr.dtype.type in scipy_to_itk_types:
raise ImageTypeError('The array dtype {} could not be mapped to any itk image type.'.format(arr.dtype))
# check if any valid conversion exists
if 0 == len(scipy_to_itk_types[arr.dtype.type]):
raise ImageTypeError('No valid itk type for the pixel data dtype {}.'.format(arr.dtype))
# iterate and try out candidate templates
ex = None
for itk_type in scipy_to_itk_types[arr.dtype.type]:
try:
return itk.Image[itk_type, arr.ndim]
except Exception as e: # pass raised exception, as a list of ordered possible itk pixel types is processed and some of them might not be compiled with the current itk wrapper module
ex = e
pass
# if none fitted, examine error and eventually translate, otherwise rethrow
if type(ex) == KeyError:
raise DependencyError('The itk python wrappers were compiled without support the combination of {} dimensions and at least one of the following pixel data types (which are compatible with dtype {}): {}.'.format(arr.ndim, arr.dtype, scipy_to_itk_types[arr.dtype.type]))
else:
raise
def getImageTypeFromFile(image): # tested
"""
Inconvenient but necessary implementation to load image with ITK whose component type
and number of dimensions are unknown.
Holds functionalities to determine the voxel data type and dimensions of an unknown
image.
Parameters
----------
image : string
Path to an image.
Returns
-------
image_type : itk.Image (template) or False
Either the correct image type (itk.Image template) associated with the image file or False if loading failed.
Raises
------
ImageLoadingError
If the header of the supplied image could be recognized but is on an unsupported type
"""
logger = Logger.getInstance()
# list of component type strings (as returned by ImageIOBase.GetComponentTypeAsString() to itk component types
string_to_component = {'char': itk.SC,
'unsigned_char': itk.UC,
'short': itk.SS,
'unsigned_short': itk.US,
'int': itk.SI,
'unsigned_int': itk.UI,
'long': itk.SL,
'unsigned_long': itk.UL,
'float': itk.F,
'double': itk.D}
# List of all current itk image loaders
image_loaders = [itk.GE4ImageIO,
itk.BMPImageIO,
itk.NiftiImageIO,
itk.PNGImageIO,
itk.BioRadImageIO,
itk.LSMImageIO,
itk.NrrdImageIO,
itk.SiemensVisionImageIO,
itk.IPLCommonImageIO,
itk.JPEGImageIO,
itk.GEAdwImageIO,
itk.AnalyzeImageIO,
itk.Brains2MaskImageIO,
itk.TIFFImageIO,
itk.VTKImageIO,
itk.GDCMImageIO,
itk.GE5ImageIO,
itk.GiplImageIO,
itk.MetaImageIO,
itk.StimulateImageIO,
itk.DICOMImageIO2]
# try to find an image loader who feels responsible for the image
# Note: All used methods are based on the common parent class ImageIOBase
for loader_class in image_loaders:
loader = loader_class.New()
if loader.CanReadFile(image):
# load image header
loader.SetFileName(image)
loader.ReadImageInformation()
# request information about the image
pixel_type = loader.GetPixelTypeAsString(loader.GetPixelType())
component_type = loader.GetComponentTypeAsString(loader.GetComponentType())
dimensions = loader.GetNumberOfDimensions()
components = loader.GetNumberOfComponents()
if not 'scalar' == pixel_type and not 1 == components:
logger.error('Can only open scalar image with one component. Found type {} with {} components.'.format(pixel_type, components))
raise ImageLoadingError('Can only open scalar image with one component. Found type {} with {} components.'.format(pixel_type, components))
# return image type object
logger.debug('Determined image type as {} with pixel type {} and {} dimensions.'.format(loader, component_type, dimensions))
return itk.Image[string_to_component[component_type], dimensions]
# no suitable loader found
return False
| lfrdm/medpy | medpy/itkvtk/utilities/itku.py | Python | gpl-3.0 | 13,196 |
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - group access via various backends.
The composite_groups is a backend that does not have direct storage,
but composes other backends to a new one, so group definitions are
retrieved from several backends. This allows to mix different
backends.
@copyright: 2009 DmitrijsMilajevs
@license: GPL, see COPYING for details
"""
from MoinMoin.datastruct.backends import BaseGroupsBackend, GroupDoesNotExistError
class CompositeGroups(BaseGroupsBackend):
"""
Manage several group backends.
"""
def __init__(self, request, *backends):
"""
@param backends: list of group backends which are used to get
access to the group definitions.
"""
super(CompositeGroups, self).__init__(request)
self._backends = backends
def __getitem__(self, group_name):
"""
Get a group by its name. First match counts.
"""
for backend in self._backends:
try:
return backend[group_name]
except GroupDoesNotExistError:
pass
raise GroupDoesNotExistError(group_name)
def __iter__(self):
"""
Iterate over group names in all backends (filtering duplicates).
If a group with same name is defined in several backends, the
composite_groups backend yields only backend which is listed
earlier in self._backends.
"""
yielded_groups = set()
for backend in self._backends:
for group_name in backend:
if group_name not in yielded_groups:
yield group_name
yielded_groups.add(group_name)
def __contains__(self, group_name):
"""
Check if a group called group_name is available in any of the backends.
@param group_name: name of the group [unicode]
"""
for backend in self._backends:
if group_name in backend:
return True
return False
def __repr__(self):
return "<%s backends=%s>" % (self.__class__, self._backends)
| Glottotopia/aagd | moin/local/moin/build/lib.linux-x86_64-2.6/MoinMoin/datastruct/backends/composite_groups.py | Python | mit | 2,200 |
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("AdaBoostClassifier" , "BreastCancer" , "db2")
| antoinecarme/sklearn2sql_heroku | tests/classification/BreastCancer/ws_BreastCancer_AdaBoostClassifier_db2_code_gen.py | Python | bsd-3-clause | 143 |
# -*- coding: utf-8 -*-
"""Objects representing MediaWiki families."""
#
# (C) Pywikibot team, 2004-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
#
import sys
import logging
import re
import collections
import imp
import string
import warnings
if sys.version_info[0] > 2:
import urllib.parse as urlparse
else:
import urlparse
from warnings import warn
import pywikibot
from pywikibot import config2 as config
from pywikibot.tools import (
deprecated, deprecated_args, issue_deprecation_warning,
FrozenDict,
)
from pywikibot.exceptions import UnknownFamily, FamilyMaintenanceWarning
logger = logging.getLogger("pywiki.wiki.family")
# Legal characters for Family.name and Family.langs keys
NAME_CHARACTERS = string.ascii_letters + string.digits
CODE_CHARACTERS = string.ascii_lowercase + string.digits + '-'
class Family(object):
"""Parent class for all wiki families."""
def __init__(self):
"""Constructor."""
if not hasattr(self, 'name'):
self.name = None
if not hasattr(self, 'langs'):
self.langs = {}
# For interwiki sorting order see
# https://meta.wikimedia.org/wiki/Interwiki_sorting_order
# The sorting order by language name from meta
# MediaWiki:Interwiki_config-sorting_order-native-languagename
self.alphabetic = [
'ace', 'kbd', 'af', 'ak', 'als', 'am', 'ang', 'ab', 'ar', 'an',
'arc', 'roa-rup', 'frp', 'as', 'ast', 'gn', 'av', 'ay', 'az', 'bm',
'bn', 'bjn', 'zh-min-nan', 'nan', 'map-bms', 'ba', 'be', 'be-x-old',
'bh', 'bcl', 'bi', 'bg', 'bar', 'bo', 'bs', 'br', 'bxr', 'ca', 'cv',
'ceb', 'cs', 'ch', 'cbk-zam', 'ny', 'sn', 'tum', 'cho', 'co', 'cy',
'da', 'dk', 'pdc', 'de', 'dv', 'nv', 'dsb', 'dz', 'mh', 'et', 'el',
'eml', 'en', 'myv', 'es', 'eo', 'ext', 'eu', 'ee', 'fa', 'hif',
'fo', 'fr', 'fy', 'ff', 'fur', 'ga', 'gv', 'gag', 'gd', 'gl', 'gan',
'ki', 'glk', 'gu', 'got', 'hak', 'xal', 'ko', 'ha', 'haw', 'hy',
'hi', 'ho', 'hsb', 'hr', 'io', 'ig', 'ilo', 'bpy', 'id', 'ia', 'ie',
'iu', 'ik', 'os', 'xh', 'zu', 'is', 'it', 'he', 'jv', 'kl', 'kn',
'kr', 'pam', 'krc', 'ka', 'ks', 'csb', 'kk', 'kw', 'rw', 'rn', 'sw',
'kv', 'kg', 'ht', 'ku', 'kj', 'ky', 'mrj', 'lad', 'lbe', 'lez',
'lo', 'ltg', 'la', 'lv', 'lb', 'lt', 'lij', 'li', 'ln', 'jbo', 'lg',
'lmo', 'hu', 'mk', 'mg', 'ml', 'mt', 'mi', 'mr', 'xmf', 'arz',
'mzn', 'ms', 'min', 'cdo', 'mwl', 'mdf', 'mo', 'mn', 'mus', 'my',
'nah', 'na', 'fj', 'nl', 'nds-nl', 'cr', 'ne', 'new', 'ja', 'nap',
'ce', 'frr', 'pih', 'no', 'nb', 'nn', 'nrm', 'nov', 'ii', 'oc',
'mhr', 'or', 'om', 'ng', 'hz', 'uz', 'pa', 'pi', 'pfl', 'pag',
'pnb', 'pap', 'ps', 'koi', 'km', 'pcd', 'pms', 'tpi', 'nds', 'pl',
'tokipona', 'tp', 'pnt', 'pt', 'aa', 'kaa', 'crh', 'ty', 'ksh',
'ro', 'rmy', 'rm', 'qu', 'rue', 'ru', 'sah', 'se', 'sm', 'sa', 'sg',
'sc', 'sco', 'stq', 'st', 'nso', 'tn', 'sq', 'scn', 'si', 'simple',
'sd', 'ss', 'sk', 'sl', 'cu', 'szl', 'so', 'ckb', 'srn', 'sr', 'sh',
'su', 'fi', 'sv', 'tl', 'ta', 'shi', 'kab', 'roa-tara', 'tt', 'te',
'tet', 'th', 'ti', 'tg', 'to', 'chr', 'chy', 've', 'tr', 'tk', 'tw',
'tyv', 'udm', 'bug', 'uk', 'ur', 'ug', 'za', 'vec', 'vep', 'vi',
'vo', 'fiu-vro', 'wa', 'zh-classical', 'vls', 'war', 'wo', 'wuu',
'ts', 'yi', 'yo', 'zh-yue', 'diq', 'zea', 'bat-smg', 'zh', 'zh-tw',
'zh-cn',
]
# The revised sorting order by first word from meta
# MediaWiki:Interwiki_config-sorting_order-native-languagename-firstword
self.alphabetic_revised = [
'ace', 'kbd', 'af', 'ak', 'als', 'am', 'ang', 'ab', 'ar', 'an',
'arc', 'roa-rup', 'frp', 'as', 'ast', 'gn', 'av', 'ay', 'az', 'bjn',
'id', 'ms', 'bm', 'bn', 'zh-min-nan', 'nan', 'map-bms', 'jv', 'su',
'ba', 'min', 'be', 'be-x-old', 'bh', 'bcl', 'bi', 'bar', 'bo', 'bs',
'br', 'bug', 'bg', 'bxr', 'ca', 'ceb', 'cv', 'cs', 'ch', 'cbk-zam',
'ny', 'sn', 'tum', 'cho', 'co', 'cy', 'da', 'dk', 'pdc', 'de', 'dv',
'nv', 'dsb', 'na', 'dz', 'mh', 'et', 'el', 'eml', 'en', 'myv', 'es',
'eo', 'ext', 'eu', 'ee', 'fa', 'hif', 'fo', 'fr', 'fy', 'ff', 'fur',
'ga', 'gv', 'sm', 'gag', 'gd', 'gl', 'gan', 'ki', 'glk', 'gu',
'got', 'hak', 'xal', 'ko', 'ha', 'haw', 'hy', 'hi', 'ho', 'hsb',
'hr', 'io', 'ig', 'ilo', 'bpy', 'ia', 'ie', 'iu', 'ik', 'os', 'xh',
'zu', 'is', 'it', 'he', 'kl', 'kn', 'kr', 'pam', 'ka', 'ks', 'csb',
'kk', 'kw', 'rw', 'ky', 'rn', 'mrj', 'sw', 'kv', 'kg', 'ht', 'ku',
'kj', 'lad', 'lbe', 'lez', 'lo', 'la', 'ltg', 'lv', 'to', 'lb',
'lt', 'lij', 'li', 'ln', 'jbo', 'lg', 'lmo', 'hu', 'mk', 'mg', 'ml',
'krc', 'mt', 'mi', 'mr', 'xmf', 'arz', 'mzn', 'cdo', 'mwl', 'koi',
'mdf', 'mo', 'mn', 'mus', 'my', 'nah', 'fj', 'nl', 'nds-nl', 'cr',
'ne', 'new', 'ja', 'nap', 'ce', 'frr', 'pih', 'no', 'nb', 'nn',
'nrm', 'nov', 'ii', 'oc', 'mhr', 'or', 'om', 'ng', 'hz', 'uz', 'pa',
'pi', 'pfl', 'pag', 'pnb', 'pap', 'ps', 'km', 'pcd', 'pms', 'nds',
'pl', 'pnt', 'pt', 'aa', 'kaa', 'crh', 'ty', 'ksh', 'ro', 'rmy',
'rm', 'qu', 'ru', 'rue', 'sah', 'se', 'sa', 'sg', 'sc', 'sco',
'stq', 'st', 'nso', 'tn', 'sq', 'scn', 'si', 'simple', 'sd', 'ss',
'sk', 'sl', 'cu', 'szl', 'so', 'ckb', 'srn', 'sr', 'sh', 'fi', 'sv',
'tl', 'ta', 'shi', 'kab', 'roa-tara', 'tt', 'te', 'tet', 'th', 'vi',
'ti', 'tg', 'tpi', 'tokipona', 'tp', 'chr', 'chy', 've', 'tr', 'tk',
'tw', 'tyv', 'udm', 'uk', 'ur', 'ug', 'za', 'vec', 'vep', 'vo',
'fiu-vro', 'wa', 'zh-classical', 'vls', 'war', 'wo', 'wuu', 'ts',
'yi', 'yo', 'zh-yue', 'diq', 'zea', 'bat-smg', 'zh', 'zh-tw',
'zh-cn',
]
# Order for fy: alphabetical by code, but y counts as i
self.fyinterwiki = self.alphabetic[:]
self.fyinterwiki.remove('nb')
self.fyinterwiki.sort(key=lambda x:
x.replace("y", "i") + x.count("y") * "!")
# letters that can follow a wikilink and are regarded as part of
# this link
# This depends on the linktrail setting in LanguageXx.php and on
# [[MediaWiki:Linktrail]].
# Note: this is a regular expression.
self.linktrails = {
'_default': u'[a-z]*',
'ab': u'[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*',
'als': u'[äöüßa-z]*',
'an': u'[a-záéíóúñ]*',
'ar': u'[a-zء-ي]*',
'arz': u'[a-zء-ي]*',
'av': u'[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*',
'ay': u'[a-záéíóúñ]*',
'bar': u'[äöüßa-z]*',
'be': u'[абвгґджзеёжзійклмнопрстуўфхцчшыьэюяćčłńśšŭźža-z]*',
'be-x-old': u'[абвгґджзеёжзійклмнопрстуўфхцчшыьэюяćčłńśšŭźža-z]*',
'bg': u'[a-zабвгдежзийклмнопрстуфхцчшщъыьэюя]*',
'bm': u'[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*',
'bs': u'[a-zćčžšđž]*',
'bxr': u'[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*',
'ca': u'[a-zàèéíòóúç·ïü]*',
'cbk-zam': u'[a-záéíóúñ]*',
'ce': u'[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*',
'ckb': u'[ئابپتجچحخدرڕزژسشعغفڤقکگلڵمنوۆهھەیێ]*',
'co': u'[a-zàéèíîìóòúù]*',
'crh': u'[a-zâçğıñöşüа-яё“»]*',
'cs': u'[a-záčďéěíňóřšťúůýž]*',
'csb': u'[a-zęóąśłżźćńĘÓĄŚŁŻŹĆŃ]*',
'cu': u'[a-zабвгдеєжѕзїіıићклмнопсстѹфхѡѿцчшщъыьѣюѥѧѩѫѭѯѱѳѷѵґѓђёјйљњќуўџэ҄я“»]*',
'cv': u'[a-zа-яĕçăӳ"»]*',
'cy': u'[àáâèéêìíîïòóôûŵŷa-z]*',
'da': u'[a-zæøå]*',
'de': u'[a-zäöüß]*',
'dsb': u'[äöüßa-z]*',
'el': u'[a-zαβγδεζηθικλμνξοπρστυφχψωςΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩάέήίόύώϊϋΐΰΆΈΉΊΌΎΏΪΫ]*',
'eml': u'[a-zàéèíîìóòúù]*',
'es': u'[a-záéíóúñ]*',
'et': u'[äöõšüža-z]*',
'fa': u'[ابپتثجچحخدذرزژسشصضطظعغفقکگلمنوهیآأئؤة]*',
'ff': u'[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*',
'fi': u'[a-zäö]*',
'fiu-vro': u'[äöõšüža-z]*',
'fo': u'[áðíóúýæøa-z]*',
'fr': u'[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*',
'frp': u'[a-zàâçéèêîœôû·’æäåāăëēïīòöōùü‘]*',
'frr': u'[a-zäöüßåāđē]*',
'fur': u'[a-zàéèíîìóòúù]*',
'fy': u'[a-zàáèéìíòóùúâêîôûäëïöü]*',
'gag': u'[a-zÇĞçğİıÖöŞşÜüÂâÎîÛû]*',
'gl': u'[áâãàéêẽçíòóôõq̃úüűũa-z]*',
'glk': u'[ابپتثجچحخدذرزژسشصضطظعغفقکگلمنوهیآأئؤة]*',
'gn': u'[a-záéíóúñ]*',
'gu': u'[-૿]*',
'he': u'[a-zא-ת]*',
'hi': u'[a-zऀ-ॣ०-꣠-ꣿ]*',
'hr': u'[čšžćđßa-z]*',
'hsb': u'[äöüßa-z]*',
'ht': u'[a-zàèòÀÈÒ]*',
'hu': u'[a-záéíóúöüőűÁÉÍÓÚÖÜŐŰ]*',
'hy': u'[a-zաբգդեզէըթժիլխծկհձղճմյնշոչպջռսվտրցւփքօֆև«»]*',
'is': u'[áðéíóúýþæöa-z-–]*',
'it': u'[a-zàéèíîìóòúù]*',
'ka': u'[a-zაბგდევზთიკლმნოპჟრსტუფქღყშჩცძწჭხჯჰ“»]*',
'kk': u'[a-zäçéğıïñöşüýʺʹа-яёәғіқңөұүһٴابپتجحدرزسشعفقكلمنڭەوۇۋۆىيچھ“»]*',
'kl': u'[a-zæøå]*',
'koi': u'[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*',
'krc': u'[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*',
'ksh': u'[a-zäöüėëijßəğåůæœç]*',
'kv': u'[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*',
'lad': u'[a-záéíóúñ]*',
'lb': u'[äöüßa-z]*',
'lbe': u'[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюяӀ1“»]*',
'lez': u'[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*',
'li': u'[a-zäöüïëéèà]*',
'lij': u'[a-zàéèíîìóòúù]*',
'lmo': u'[a-zàéèíîìóòúù]*',
'ln': u'[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*',
'mg': u'[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*',
'mhr': u'[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*',
'mk': u'[a-zабвгдѓежзѕијклљмнњопрстќуфхцчџш]*',
'ml': u'[a-zം-ൿ]*',
'mn': u'[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя“»]*',
'mr': u'[ऀ-ॣॱ-ॿ]*',
'mrj': u'[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*',
'mwl': u'[áâãàéêẽçíòóôõq̃úüűũa-z]*',
'myv': u'[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*',
'mzn': u'[ابپتثجچحخدذرزژسشصضطظعغفقکگلمنوهیآأئؤة]*',
'nah': u'[a-záéíóúñ]*',
'nap': u'[a-zàéèíîìóòúù]*',
'nds': u'[äöüßa-z]*',
'nds-nl': u'[a-zäöüïëéèà]*',
'nl': u'[a-zäöüïëéèà]*',
'nn': u'[æøåa-z]*',
'no': u'[æøåa-z]*',
'oc': u'[a-zàâçéèêîôû]*',
'or': u'[a-z-]*',
'pa': u'[ਁਂਃਅਆਇਈਉਊਏਐਓਔਕਖਗਘਙਚਛਜਝਞਟਠਡਢਣਤਥਦਧਨਪਫਬਭਮਯਰਲਲ਼ਵਸ਼ਸਹ਼ਾਿੀੁੂੇੈੋੌ੍ਖ਼ਗ਼ਜ਼ੜਫ਼ੰੱੲੳa-z]*',
'pcd': u'[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*',
'pdc': u'[äöüßa-z]*',
'pfl': u'[äöüßa-z]*',
'pl': u'[a-zęóąśłżźćńĘÓĄŚŁŻŹĆŃ]*',
'pms': u'[a-zàéèíîìóòúù]*',
'pt': u'[a-záâãàéêẽçíòóôõq̃úüűũ]*',
'qu': u'[a-záéíóúñ]*',
'rmy': u'[a-zăâîşţșțĂÂÎŞŢȘȚ]*',
'ro': u'[a-zăâîşţșțĂÂÎŞŢȘȚ]*',
'ru': u'[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*',
'rue': u'[a-zабвгґдеєжзиіїйклмнопрстуфхцчшщьєюяёъы“»]*',
'sa': u'[a-zऀ-ॣ०-꣠-ꣿ]*',
'sah': u'[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*',
'scn': u'[a-zàéèíîìóòúù]*',
'sg': u'[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*',
'sh': u'[a-zčćđžš]*',
'sk': u'[a-záäčďéíľĺňóôŕšťúýž]*',
'sl': u'[a-zčćđžš]*',
'sr': u'[abvgdđežzijklljmnnjoprstćufhcčdžšабвгдђежзијклљмнњопрстћуфхцчџш]*',
'srn': u'[a-zäöüïëéèà]*',
'stq': u'[äöüßa-z]*',
'sv': u'[a-zåäöéÅÄÖÉ]*',
'szl': u'[a-zęóąśłżźćńĘÓĄŚŁŻŹĆŃ]*',
'ta': u'[-]*',
'te': u'[ఁ-౯]*',
'tg': u'[a-zабвгдеёжзийклмнопрстуфхчшъэюяғӣқўҳҷцщыь]*',
'tk': u'[a-zÄäÇçĞğŇňÖöŞşÜüÝýŽž]*',
'tr': u'[a-zÇĞçğİıÖöŞşÜüÂâÎîÛû]*',
'tt': u'[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюяӘәӨөҮүҖҗҢңҺһ]*',
'ty': u'[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*',
'tyv': u'[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*',
'udm': u'[a-zа-яёӝӟӥӧӵ“»]*',
'uk': u'[a-zабвгґдеєжзиіїйклмнопрстуфхцчшщьєюяёъы“»]*',
'uz': u'[a-zʻʼ“»]*',
'vec': u'[a-zàéèíîìóòúù]*',
'vep': u'[äöõšüža-z]*',
'vi': u'[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*',
'vls': u'[a-zäöüïëéèà]*',
'wa': u'[a-zåâêîôûçéè]*',
'wo': u'[a-zàâçéèêîôûäëïöüùÇÉÂÊÎÔÛÄËÏÖÜÀÈÙ]*',
'xal': u'[a-zабвгдеёжзийклмнопрстуфхцчшщъыьэюя]*',
'xmf': u'[a-zაბგდევზთიკლმნოპჟრსტუფქღყშჩცძწჭხჯჰ“»]*',
'yi': u'[a-zא-ת]*',
'zea': u'[a-zäöüïëéèà]*',
}
# A dictionary where keys are family codes that can be used in
# inter-family interwiki links. Do not use it directly but
# get_known_families() instead.
# TODO: replace this with API interwikimap call
self.known_families = {
'abbenormal': 'abbenormal',
'acronym': 'acronym',
'advisory': 'advisory',
'advogato': 'advogato',
'aew': 'aew',
'airwarfare': 'airwarfare',
'aiwiki': 'aiwiki',
'allwiki': 'allwiki',
'appropedia': 'appropedia',
'aquariumwiki': 'aquariumwiki',
'arxiv': 'arxiv',
'aspienetwiki': 'aspienetwiki',
'atmwiki': 'atmwiki',
'b': 'wikibooks',
'battlestarwiki': 'battlestarwiki',
'bemi': 'bemi',
'benefitswiki': 'benefitswiki',
'betawiki': 'betawiki',
'betawikiversity': 'betawikiversity',
'biblewiki': 'biblewiki',
'bluwiki': 'bluwiki',
'botwiki': 'botwiki',
'boxrec': 'boxrec',
'brickwiki': 'brickwiki',
'bridgeswiki': 'bridgeswiki',
'bugzilla': 'bugzilla',
'buzztard': 'buzztard',
'bytesmiths': 'bytesmiths',
'c2': 'c2',
'c2find': 'c2find',
'cache': 'cache',
'canwiki': 'canwiki',
'canyonwiki': 'canyonwiki',
'Ĉej': 'Ĉej',
'cellwiki': 'cellwiki',
'centralwikia': 'centralwikia',
'chapter': 'chapter',
'chej': 'chej',
'choralwiki': 'choralwiki',
'ciscavate': 'ciscavate',
'citizendium': 'citizendium',
'ckwiss': 'ckwiss',
'closed-zh-tw': 'closed-zh-tw',
'cndbname': 'cndbname',
'cndbtitle': 'cndbtitle',
'colab': 'colab',
'comcom': 'comcom',
'comixpedia': 'comixpedia',
'commons': 'commons',
'communityscheme': 'communityscheme',
'comune': 'comune',
'consciousness': 'consciousness',
'corpknowpedia': 'corpknowpedia',
'crazyhacks': 'crazyhacks',
'creatureswiki': 'creatureswiki',
'cxej': 'cxej',
'dawiki': 'dawiki',
'dbdump': 'dbdump',
'dcc': 'dcc',
'dcdatabase': 'dcdatabase',
'dcma': 'dcma',
'dejanews': 'dejanews',
'delicious': 'delicious',
'demokraatia': 'demokraatia',
'devmo': 'devmo',
'dict': 'dict',
'dictionary': 'dictionary',
'disinfopedia': 'disinfopedia',
'distributedproofreaders': 'distributedproofreaders',
'distributedproofreadersca': 'distributedproofreadersca',
'dk': 'dk',
'dmoz': 'dmoz',
'dmozs': 'dmozs',
'docbook': 'docbook',
# 'doi': 'doi',
'doom_wiki': 'doom_wiki',
'download': 'download',
'drae': 'drae',
'dreamhost': 'dreamhost',
'drumcorpswiki': 'drumcorpswiki',
'dwjwiki': 'dwjwiki',
'eĉei': 'eĉei',
'echei': 'echei',
'ecoreality': 'ecoreality',
'ecxei': 'ecxei',
'efnetceewiki': 'efnetceewiki',
'efnetcppwiki': 'efnetcppwiki',
'efnetpythonwiki': 'efnetpythonwiki',
'efnetxmlwiki': 'efnetxmlwiki',
'elibre': 'elibre',
'emacswiki': 'emacswiki',
'energiewiki': 'energiewiki',
'eokulturcentro': 'eokulturcentro',
'epo': 'epo',
'ethnologue': 'ethnologue',
'evowiki': 'evowiki',
'exotica': 'exotica',
'fanimutationwiki': 'fanimutationwiki',
'finalempire': 'finalempire',
'finalfantasy': 'finalfantasy',
'finnix': 'finnix',
'flickruser': 'flickruser',
'floralwiki': 'floralwiki',
'flyerwiki-de': 'flyerwiki-de',
'foldoc': 'foldoc',
'forthfreak': 'forthfreak',
'foundation': 'foundation',
'foxwiki': 'foxwiki',
'freebio': 'freebio',
'freebsdman': 'freebsdman',
'freeculturewiki': 'freeculturewiki',
'freedomdefined': 'freedomdefined',
'freefeel': 'freefeel',
'freekiwiki': 'freekiwiki',
'ganfyd': 'ganfyd',
'gausswiki': 'gausswiki',
'gentoo-wiki': 'gentoo',
'genwiki': 'genwiki',
'globalvoices': 'globalvoices',
'glossarwiki': 'glossarwiki',
'glossarywiki': 'glossarywiki',
'golem': 'golem',
'google': 'google',
'googledefine': 'googledefine',
'googlegroups': 'googlegroups',
'gotamac': 'gotamac',
'greatlakeswiki': 'greatlakeswiki',
'guildwiki': 'guildwiki',
'gutenberg': 'gutenberg',
'gutenbergwiki': 'gutenbergwiki',
'h2wiki': 'h2wiki',
'hammondwiki': 'hammondwiki',
'heroeswiki': 'heroeswiki',
'herzkinderwiki': 'herzkinderwiki',
'hkmule': 'hkmule',
'holshamtraders': 'holshamtraders',
'hrfwiki': 'hrfwiki',
'hrwiki': 'hrwiki',
'humancell': 'humancell',
'hupwiki': 'hupwiki',
'imdbcharacter': 'imdbcharacter',
'imdbcompany': 'imdbcompany',
'imdbname': 'imdbname',
'imdbtitle': 'imdbtitle',
'incubator': 'incubator',
'infoanarchy': 'infoanarchy',
'infosecpedia': 'infosecpedia',
'infosphere': 'infosphere',
'iso639-3': 'iso639-3',
'iuridictum': 'iuridictum',
'jameshoward': 'jameshoward',
'javanet': 'javanet',
'javapedia': 'javapedia',
'jefo': 'jefo',
'jiniwiki': 'jiniwiki',
'jspwiki': 'jspwiki',
'jstor': 'jstor',
'kamelo': 'kamelo',
'karlsruhe': 'karlsruhe',
'kerimwiki': 'kerimwiki',
'kinowiki': 'kinowiki',
'kmwiki': 'kmwiki',
'kontuwiki': 'kontuwiki',
'koslarwiki': 'koslarwiki',
'kpopwiki': 'kpopwiki',
'linguistlist': 'linguistlist',
'linuxwiki': 'linuxwiki',
'linuxwikide': 'linuxwikide',
'liswiki': 'liswiki',
'literateprograms': 'literateprograms',
'livepedia': 'livepedia',
'lojban': 'lojban',
'lostpedia': 'lostpedia',
'lqwiki': 'lqwiki',
'lugkr': 'lugkr',
'luxo': 'luxo',
'lyricwiki': 'lyricwiki',
'm': 'meta',
'm-w': 'm-w',
'mail': 'mail',
'mailarchive': 'mailarchive',
'mariowiki': 'mariowiki',
'marveldatabase': 'marveldatabase',
'meatball': 'meatball',
'mediazilla': 'mediazilla',
'memoryalpha': 'memoryalpha',
'meta': 'meta',
'metawiki': 'metawiki',
'metawikipedia': 'metawikipedia',
'mineralienatlas': 'mineralienatlas',
'moinmoin': 'moinmoin',
'monstropedia': 'monstropedia',
'mosapedia': 'mosapedia',
'mozcom': 'mozcom',
'mozillawiki': 'mozillawiki',
'mozillazinekb': 'mozillazinekb',
'musicbrainz': 'musicbrainz',
'mw': 'mw',
'mwod': 'mwod',
'mwot': 'mwot',
'n': 'wikinews',
'netvillage': 'netvillage',
'nkcells': 'nkcells',
'nomcom': 'nomcom',
'nosmoke': 'nosmoke',
'nost': 'nost',
'oeis': 'oeis',
'oldwikisource': 'oldwikisource',
'olpc': 'olpc',
'onelook': 'onelook',
'openfacts': 'openfacts',
'openstreetmap': 'openstreetmap',
'openwetware': 'openwetware',
'openwiki': 'openwiki',
'opera7wiki': 'opera7wiki',
'organicdesign': 'organicdesign',
'orgpatterns': 'orgpatterns',
'orthodoxwiki': 'orthodoxwiki',
'osi reference model': 'osi reference model',
'otrs': 'otrs',
'otrswiki': 'otrswiki',
'ourmedia': 'ourmedia',
'paganwiki': 'paganwiki',
'panawiki': 'panawiki',
'pangalacticorg': 'pangalacticorg',
'patwiki': 'patwiki',
'perlconfwiki': 'perlconfwiki',
'perlnet': 'perlnet',
'personaltelco': 'personaltelco',
'phpwiki': 'phpwiki',
'phwiki': 'phwiki',
'planetmath': 'planetmath',
'pmeg': 'pmeg',
'pmwiki': 'pmwiki',
'psycle': 'psycle',
'purlnet': 'purlnet',
'pythoninfo': 'pythoninfo',
'pythonwiki': 'pythonwiki',
'pywiki': 'pywiki',
'q': 'wikiquote',
'qcwiki': 'qcwiki',
'quality': 'quality',
'qwiki': 'qwiki',
'r3000': 'r3000',
'raec': 'raec',
'rakwiki': 'rakwiki',
'reuterswiki': 'reuterswiki',
'rev': 'rev',
'revo': 'revo',
'rfc': 'rfc',
'rheinneckar': 'rheinneckar',
'robowiki': 'robowiki',
'rowiki': 'rowiki',
's': 'wikisource',
's23wiki': 's23wiki',
'scholar': 'scholar',
'schoolswp': 'schoolswp',
'scores': 'scores',
'scoutwiki': 'scoutwiki',
'scramble': 'scramble',
'seapig': 'seapig',
'seattlewiki': 'seattlewiki',
'seattlewireless': 'seattlewireless',
'senseislibrary': 'senseislibrary',
'silcode': 'silcode',
'slashdot': 'slashdot',
'slwiki': 'slwiki',
'smikipedia': 'smikipedia',
'sourceforge': 'sourceforge',
'spcom': 'spcom',
'species': 'species',
'squeak': 'squeak',
'stable': 'stable',
'strategywiki': 'strategywiki',
'sulutil': 'sulutil',
'susning': 'susning',
'svgwiki': 'svgwiki',
'svn': 'svn',
'swinbrain': 'swinbrain',
'swingwiki': 'swingwiki',
'swtrain': 'swtrain',
'tabwiki': 'tabwiki',
'takipedia': 'takipedia',
'tavi': 'tavi',
'tclerswiki': 'tclerswiki',
'technorati': 'technorati',
'tejo': 'tejo',
'tesoltaiwan': 'tesoltaiwan',
'testwiki': 'testwiki',
'thelemapedia': 'thelemapedia',
'theopedia': 'theopedia',
'theppn': 'theppn',
'thinkwiki': 'thinkwiki',
'tibiawiki': 'tibiawiki',
'ticket': 'ticket',
'tmbw': 'tmbw',
'tmnet': 'tmnet',
'tmwiki': 'tmwiki',
'tokyonights': 'tokyonights',
'tools': 'tools',
'translatewiki': 'translatewiki',
'trash!italia': 'trash!italia',
'tswiki': 'tswiki',
'turismo': 'turismo',
'tviv': 'tviv',
'tvtropes': 'tvtropes',
'twiki': 'twiki',
'twistedwiki': 'twistedwiki',
'tyvawiki': 'tyvawiki',
'uncyclopedia': 'uncyclopedia',
'unreal': 'unreal',
'urbandict': 'urbandict',
'usej': 'usej',
'usemod': 'usemod',
'v': 'wikiversity',
'valuewiki': 'valuewiki',
'veropedia': 'veropedia',
'vinismo': 'vinismo',
'vkol': 'vkol',
'vlos': 'vlos',
'voipinfo': 'voipinfo',
'voy': 'wikivoyage',
'w': 'wikipedia',
'warpedview': 'warpedview',
'webdevwikinl': 'webdevwikinl',
'webisodes': 'webisodes',
'webseitzwiki': 'webseitzwiki',
'wg': 'wg',
'wiki': 'wiki',
'wikia': 'wikia',
'wikianso': 'wikianso',
'wikiasite': 'wikiasite',
'wikible': 'wikible',
'wikibooks': 'wikibooks',
'wikichat': 'wikichat',
'wikichristian': 'wikichristian',
'wikicities': 'wikicities',
'wikicity': 'wikicity',
'wikif1': 'wikif1',
'wikifur': 'wikifur',
'wikihow': 'wikihow',
'wikiindex': 'wikiindex',
'wikilemon': 'wikilemon',
'wikilivres': 'wikilivres',
'wikimac-de': 'wikimac-de',
'wikimac-fr': 'wikimac-fr',
'wikimedia': 'wikimedia',
'wikinews': 'wikinews',
'wikinfo': 'wikinfo',
'wikinurse': 'wikinurse',
'wikinvest': 'wikinvest',
'wikipaltz': 'wikipaltz',
'wikipedia': 'wikipedia',
'wikipediawikipedia': 'wikipediawikipedia',
'wikiquote': 'wikiquote',
'wikireason': 'wikireason',
'wikischool': 'wikischool',
'wikisophia': 'wikisophia',
'wikisource': 'wikisource',
'wikispecies': 'wikispecies',
'wikispot': 'wikispot',
'wikiti': 'wikiti',
'wikitravel': 'wikitravel',
'wikitree': 'wikitree',
'wikiversity': 'wikiversity',
'wikiwikiweb': 'wikiwikiweb',
'wikt': 'wiktionary',
'wiktionary': 'wiktionary',
'wipipedia': 'wipipedia',
'wlug': 'wlug',
'wm2005': 'wm2005',
'wm2006': 'wm2006',
'wm2007': 'wm2007',
'wm2008': 'wm2008',
'wm2009': 'wm2009',
'wm2010': 'wm2010',
'wmania': 'wmania',
'wmcz': 'wmcz',
'wmf': 'wmf',
'wmrs': 'wmrs',
'wmse': 'wmse',
'wookieepedia': 'wookieepedia',
'world66': 'world66',
'wowwiki': 'wowwiki',
'wqy': 'wqy',
'wurmpedia': 'wurmpedia',
'wznan': 'wznan',
'xboxic': 'xboxic',
'zh-cfr': 'zh-cfr',
'zrhwiki': 'zrhwiki',
'zum': 'zum',
'zwiki': 'zwiki',
'zzz wiki': 'zzz wiki',
}
# A list of category redirect template names in different languages
self.category_redirect_templates = {
'_default': []
}
# A list of languages that use hard (instead of soft) category redirects
self.use_hard_category_redirects = []
# A list of disambiguation template names in different languages
self.disambiguationTemplates = {
'_default': []
}
# A list of projects that share cross-project sessions.
self.cross_projects = []
# A list with the name for cross-project cookies.
# default for wikimedia centralAuth extensions.
self.cross_projects_cookies = ['centralauth_Session',
'centralauth_Token',
'centralauth_User']
self.cross_projects_cookie_username = 'centralauth_User'
# A list with the name in the cross-language flag permissions
self.cross_allowed = []
# A list with the name of the category containing disambiguation
# pages for the various languages. Only one category per language,
# and without the namespace, so add things like:
# 'en': "Disambiguation"
self.disambcatname = {}
# DEPRECATED, stores the code of the site which have a case sensitive
# main namespace. Use the Namespace given from the Site instead
self.nocapitalize = []
# attop is a list of languages that prefer to have the interwiki
# links at the top of the page.
self.interwiki_attop = []
# on_one_line is a list of languages that want the interwiki links
# one-after-another on a single line
self.interwiki_on_one_line = []
# String used as separator between interwiki links and the text
self.interwiki_text_separator = config.line_separator * 2
# Similar for category
self.category_attop = []
# on_one_line is a list of languages that want the category links
# one-after-another on a single line
self.category_on_one_line = []
# String used as separator between category links and the text
self.category_text_separator = config.line_separator * 2
# When both at the bottom should categories come after interwikilinks?
# TODO: T86284 Needed on Wikia sites, as it uses the CategorySelect
# extension which puts categories last on all sites. TO BE DEPRECATED!
self.categories_last = []
# Which languages have a special order for putting interlanguage
# links, and what order is it? If a language is not in
# interwiki_putfirst, alphabetical order on language code is used.
# For languages that are in interwiki_putfirst, interwiki_putfirst
# is checked first, and languages are put in the order given there.
# All other languages are put after those, in code-alphabetical
# order.
self.interwiki_putfirst = {}
# Some families, e. g. commons and meta, are not multilingual and
# forward interlanguage links to another family (wikipedia).
# These families can set this variable to the name of the target
# family.
self.interwiki_forward = None
# Some families, e. g. wikipedia, receive forwarded interlanguage
# links from other families, e. g. incubator, commons, or meta.
# These families can set this variable to the names of their source
# families.
self.interwiki_forwarded_from = {}
# Which language codes no longer exist and by which language code
# should they be replaced. If for example the language with code xx:
# now should get code yy:, add {'xx':'yy'} to obsolete.
if not hasattr(self, 'interwiki_replacements'):
self.interwiki_replacements = {}
# Codes that should be removed, usually because the site has been
# taken down.
if not hasattr(self, 'interwiki_removals'):
self.interwiki_removals = []
# Language codes of the largest wikis. They should be roughly sorted
# by size.
self.languages_by_size = []
# Some languages belong to a group where the possibility is high that
# equivalent articles have identical titles among the group.
self.language_groups = {
# languages using the arabic script (incomplete)
'arab': [
'ar', 'arz', 'ps', 'sd', 'ur', 'bjn', 'ckb',
# languages using multiple scripts, including arabic
'kk', 'ku', 'tt', 'ug', 'pnb'
],
# languages that use chinese symbols
'chinese': [
'wuu', 'zh', 'zh-classical', 'zh-yue', 'gan', 'ii',
# languages using multiple/mixed scripts, including chinese
'ja', 'za'
],
# languages that use the cyrillic alphabet
'cyril': [
'ab', 'av', 'ba', 'be', 'be-x-old', 'bg', 'bxr', 'ce', 'cu',
'cv', 'kbd', 'koi', 'kv', 'ky', 'mk', 'lbe', 'mdf', 'mn', 'mo',
'myv', 'mhr', 'mrj', 'os', 'ru', 'rue', 'sah', 'tg', 'tk',
'udm', 'uk', 'xal',
# languages using multiple scripts, including cyrillic
'ha', 'kk', 'sh', 'sr', 'tt'
],
# languages that use a greek script
'grec': [
'el', 'grc', 'pnt'
# languages using multiple scripts, including greek
],
# languages that use the latin alphabet
'latin': [
'aa', 'ace', 'af', 'ak', 'als', 'an', 'ang', 'ast', 'ay', 'bar',
'bat-smg', 'bcl', 'bi', 'bm', 'br', 'bs', 'ca', 'cbk-zam',
'cdo', 'ceb', 'ch', 'cho', 'chy', 'co', 'crh', 'cs', 'csb',
'cy', 'da', 'de', 'diq', 'dsb', 'ee', 'eml', 'en', 'eo', 'es',
'et', 'eu', 'ext', 'ff', 'fi', 'fiu-vro', 'fj', 'fo', 'fr',
'frp', 'frr', 'fur', 'fy', 'ga', 'gag', 'gd', 'gl', 'gn', 'gv',
'hak', 'haw', 'hif', 'ho', 'hr', 'hsb', 'ht', 'hu', 'hz', 'ia',
'id', 'ie', 'ig', 'ik', 'ilo', 'io', 'is', 'it', 'jbo', 'jv',
'kaa', 'kab', 'kg', 'ki', 'kj', 'kl', 'kr', 'ksh', 'kw', 'la',
'lad', 'lb', 'lg', 'li', 'lij', 'lmo', 'ln', 'lt', 'ltg', 'lv',
'map-bms', 'mg', 'mh', 'mi', 'ms', 'mt', 'mus', 'mwl', 'na',
'nah', 'nap', 'nds', 'nds-nl', 'ng', 'nl', 'nn', 'no', 'nov',
'nrm', 'nv', 'ny', 'oc', 'om', 'pag', 'pam', 'pap', 'pcd',
'pdc', 'pfl', 'pih', 'pl', 'pms', 'pt', 'qu', 'rm', 'rn', 'ro',
'roa-rup', 'roa-tara', 'rw', 'sc', 'scn', 'sco', 'se', 'sg',
'simple', 'sk', 'sl', 'sm', 'sn', 'so', 'sq', 'srn', 'ss', 'st',
'stq', 'su', 'sv', 'sw', 'szl', 'tet', 'tl', 'tn', 'to', 'tpi',
'tr', 'ts', 'tum', 'tw', 'ty', 'uz', 've', 'vec', 'vi', 'vls',
'vo', 'wa', 'war', 'wo', 'xh', 'yo', 'zea', 'zh-min-nan', 'zu',
# languages using multiple scripts, including latin
'az', 'chr', 'ckb', 'ha', 'iu', 'kk', 'ku', 'rmy', 'sh', 'sr',
'tt', 'ug', 'za'
],
# Scandinavian languages
'scand': [
'da', 'fo', 'is', 'nb', 'nn', 'no', 'sv'
],
}
# LDAP domain if your wiki uses LDAP authentication,
# https://www.mediawiki.org/wiki/Extension:LDAP_Authentication
self.ldapDomain = ()
# Allows crossnamespace interwiki linking.
# Lists the possible crossnamespaces combinations
# keys are originating NS
# values are dicts where:
# keys are the originating langcode, or _default
# values are dicts where:
# keys are the languages that can be linked to from the lang+ns, or
# '_default'; values are a list of namespace numbers
self.crossnamespace = collections.defaultdict(dict)
##
# Examples :
#
# Allowing linking to pt' 102 NS from any other lang' 0 NS is
#
# self.crossnamespace[0] = {
# '_default': { 'pt': [102]}
# }
#
# While allowing linking from pt' 102 NS to any other lang' = NS is
#
# self.crossnamespace[102] = {
# 'pt': { '_default': [0]}
# }
_families = {}
def __getattribute__(self, name):
"""
Check if attribute is deprecated and warn accordingly.
This is necessary as subclasses could prevent that message by using a
class variable. Only penalize getting it because it must be set so that
the backwards compatibility is still available.
"""
if name == 'nocapitalize':
issue_deprecation_warning('nocapitalize',
"APISite.siteinfo['case'] or "
"Namespace.case == 'case-sensitive'", 2)
elif name == 'known_families':
issue_deprecation_warning('known_families',
'APISite.interwiki(prefix)', 2)
return super(Family, self).__getattribute__(name)
@staticmethod
@deprecated_args(fatal=None)
def load(fam=None):
"""Import the named family.
@param fam: family name (if omitted, uses the configured default)
@type fam: str
@return: a Family instance configured for the named family.
@raises UnknownFamily: family not known
"""
if fam is None:
fam = config.family
assert all(x in NAME_CHARACTERS for x in fam), \
'Name of family must be ASCII character'
if fam in Family._families:
return Family._families[fam]
if fam in config.family_files:
family_file = config.family_files[fam]
if family_file.startswith('http://') or family_file.startswith('https://'):
myfamily = AutoFamily(fam, family_file)
Family._families[fam] = myfamily
return Family._families[fam]
elif fam == 'lockwiki':
raise UnknownFamily(
"Family 'lockwiki' has been removed as it not a public wiki.\n"
"You may install your own family file for this wiki, and a "
"old family file may be found at:\n"
"http://git.wikimedia.org/commitdiff/pywikibot%2Fcore.git/dfdc0c9150fa8e09829bb9d236")
try:
# Ignore warnings due to dots in family names.
# TODO: use more specific filter, so that family classes can use
# RuntimeWarning's while loading.
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
mod = imp.load_source(fam, config.family_files[fam])
except (ImportError, KeyError):
raise UnknownFamily(u'Family %s does not exist' % fam)
cls = mod.Family()
if cls.name != fam:
warn(u'Family name %s does not match family module name %s'
% (cls.name, fam), FamilyMaintenanceWarning)
# Family 'name' and the 'langs' codes must be ascii, and the
# codes must be lower-case due to the Site loading algorithm.
if not all(x in NAME_CHARACTERS for x in cls.name):
warn(u'Family name %s contains non-ascii characters' % cls.name,
FamilyMaintenanceWarning)
# FIXME: wikisource uses code '-' for www.wikisource.org
if not all(all(x in CODE_CHARACTERS for x in code) and
(cls.name == 'wikisource' or code[0] != '-')
for code in cls.langs.keys()):
warn(u'Family %s codes contains non-ascii characters',
FamilyMaintenanceWarning)
Family._families[fam] = cls
return cls
@property
def iwkeys(self):
if self.interwiki_forward:
return list(pywikibot.Family(self.interwiki_forward).langs.keys())
return list(self.langs.keys())
def get_known_families(self, site):
return self.known_families
def linktrail(self, code, fallback='_default'):
"""Return regex for trailing chars displayed as part of a link.
Returns a string, not a compiled regular expression object.
This reads from the family file, and ''not'' from
[[MediaWiki:Linktrail]], because the MW software currently uses a
built-in linktrail from its message files and ignores the wiki
value.
"""
if code in self.linktrails:
return self.linktrails[code]
elif fallback:
return self.linktrails[fallback]
else:
raise KeyError(
"ERROR: linktrail in language %(language_code)s unknown"
% {'language_code': code})
def category_redirects(self, code, fallback="_default"):
if not hasattr(self, "_catredirtemplates") or \
code not in self._catredirtemplates:
self.get_cr_templates(code, fallback)
if code in self._catredirtemplates:
return self._catredirtemplates[code]
else:
raise KeyError("ERROR: title for category redirect template in "
"language '%s' unknown" % code)
def get_cr_templates(self, code, fallback):
if not hasattr(self, "_catredirtemplates"):
self._catredirtemplates = {}
if code in self.category_redirect_templates:
cr_template_list = self.category_redirect_templates[code]
cr_list = list(self.category_redirect_templates[code])
else:
cr_template_list = self.category_redirect_templates[fallback]
cr_list = []
if cr_template_list:
cr_template = cr_template_list[0]
# start with list of category redirect templates from family file
cr_page = pywikibot.Page(pywikibot.Site(code, self),
"Template:" + cr_template)
# retrieve all redirects to primary template from API,
# add any that are not already on the list
for t in cr_page.backlinks(filterRedirects=True, namespaces=10):
newtitle = t.title(withNamespace=False)
if newtitle not in cr_list:
cr_list.append(newtitle)
self._catredirtemplates[code] = cr_list
def disambig(self, code, fallback='_default'):
if code in self.disambiguationTemplates:
return self.disambiguationTemplates[code]
elif fallback:
return self.disambiguationTemplates[fallback]
else:
raise KeyError(
"ERROR: title for disambig template in language %s unknown"
% code)
# Methods
def protocol(self, code):
"""
The protocol to use to connect to the site.
May be overridden to return 'https'. Other protocols are not supported.
@param code: language code
@type code: string
@return: protocol that this family uses
@rtype: string
"""
return 'http'
def ignore_certificate_error(self, code):
"""
Return whether a HTTPS certificate error should be ignored.
@param code: language code
@type code: string
@return: flag to allow access if certificate has an error.
@rtype: bool
"""
return False
def hostname(self, code):
"""The hostname to use for standard http connections."""
return self.langs[code]
def ssl_hostname(self, code):
"""The hostname to use for SSL connections."""
return self.hostname(code)
def scriptpath(self, code):
"""The prefix used to locate scripts on this wiki.
This is the value displayed when you enter {{SCRIPTPATH}} on a
wiki page (often displayed at [[Help:Variables]] if the wiki has
copied the master help page correctly).
The default value is the one used on Wikimedia Foundation wikis,
but needs to be overridden in the family file for any wiki that
uses a different value.
"""
return '/w'
def ssl_pathprefix(self, code):
"""The path prefix for secure HTTP access."""
# Override this ONLY if the wiki family requires a path prefix
return ''
def base_url(self, code, uri):
protocol = self.protocol(code)
if protocol == 'https':
host = self.ssl_hostname(code)
uri = self.ssl_pathprefix(code) + uri
else:
host = self.hostname(code)
return urlparse.urljoin('{0}://{1}'.format(protocol, host), uri)
def path(self, code):
return '%s/index.php' % self.scriptpath(code)
def querypath(self, code):
return '%s/query.php' % self.scriptpath(code)
def apipath(self, code):
return '%s/api.php' % self.scriptpath(code)
# TODO: @deprecated('APISite.article_path')
# As soon as from_url does not need nicepath anymore
def nicepath(self, code):
return '/wiki/'
def _get_path_regex(self, code):
"""
Return a regex matching a site URL path.
@return: regex string
@rtype: unicode
"""
# The trailing slash after path(code) is optional.
return ('(?:%s?|%s)' %
(re.escape(self.path(code) + '/'),
re.escape(self.nicepath(code))))
def _get_url_regex(self, code):
"""
Return a regex matching a site URL.
Regex match group 1 is the domain.
Does not make use of ssl_hostname or ssl_pathprefix.
@return: regex string
@rtype: unicode
"""
return (r'(?:\/\/|%s\:\/\/)(%s)%s' %
(self.protocol(code),
re.escape(self.hostname(code)),
self._get_path_regex(code)))
def rcstream_host(self, code):
raise NotImplementedError("This family does not support RCStream")
@deprecated_args(name='title')
def get_address(self, code, title):
return '%s?title=%s&redirect=no' % (self.path(code), title)
@deprecated('APISite.nice_get_address(title)')
def nice_get_address(self, code, title):
return '%s%s' % (self.nicepath(code), title)
def _get_regex_all(self):
"""
Return a regex matching any site.
It is using Family methods with code set to 'None' initially.
That will raise KeyError if the Family methods use the code to
lookup the correct value in a dictionary such as C{langs}.
On KeyError, it retries it with each key from C{langs}.
@return: regex string
@rtype: unicode
"""
if hasattr(self, '_regex_all'):
return self._regex_all
try:
self._regex_all = self._get_url_regex(None)
return self._regex_all
except KeyError:
# Probably automatically generated family
pass
# If there is only one code, use it.
if len(self.langs) == 1:
code = next(iter(self.langs.keys()))
self._regex_all = self._get_url_regex(code)
return self._regex_all
try:
protocol = self.protocol(None) + '\:\/\/'
except KeyError:
protocol = None
try:
hostname = re.escape(self.hostname(None))
except KeyError:
hostname = None
try:
path = self._get_path_regex(None)
except KeyError:
path = None
# If two or more of the three above varies, the regex cant be optimised
none_count = [protocol, hostname, path].count(None)
if none_count > 1:
self._regex_all = ('(?:%s)'
% '|'.join(self._get_url_regex(code)
for code in self.langs.keys()))
return self._regex_all
if not protocol:
protocols = set(self.protocol(code) + '\:\/\/'
for code in self.langs.keys())
protocol = '|'.join(protocols)
# Allow protocol neutral '//'
protocol = '(?:\/\/|%s)' % protocol
if not hostname:
hostnames = set(re.escape(self.hostname(code))
for code in self.langs.keys())
hostname = '|'.join(hostnames)
# capture hostname
hostname = '(' + hostname + ')'
if not path:
regexes = set(self._get_path_regex(code)
for code in self.langs.keys())
path = '(?:%s)' % '|'.join(regexes)
self._regex_all = protocol + hostname + path
return self._regex_all
def from_url(self, url):
"""
Return whether this family matches the given url.
It must match URLs generated via C{self.langs} and
L{Family.nice_get_address} or L{Family.path}. If the protocol doesn't
match but is present in the interwikimap it'll log this.
It ignores $1 in the url, and anything that follows it.
@return: The language code of the url. None if that url is not from
this family.
@rtype: str or None
@raises RuntimeError: Mismatch between Family langs dictionary and
URL regex.
"""
if '$1' in url:
url = url[:url.find('$1')]
url_match = re.match(self._get_regex_all(), url)
if not url_match:
return None
for code, domain in self.langs.items():
if domain is None:
warn('Family(%s): langs missing domain names' % self.name,
FamilyMaintenanceWarning)
elif domain == url_match.group(1):
return code
# if domain was None, this will return the only possible code.
if len(self.langs) == 1:
return next(iter(self.langs))
raise RuntimeError(
'Family(%s): matched regex has not matched a domain in langs'
% self.name)
def maximum_GET_length(self, code):
return config.maximum_GET_length
def dbName(self, code):
# returns the name of the MySQL database
return '%s%s' % (code, self.name)
# Which version of MediaWiki is used?
@deprecated('APISite.version()')
def version(self, code):
"""Return MediaWiki version number as a string.
Use L{pywikibot.tools.MediaWikiVersion} to compare version strings.
"""
# Here we return the latest mw release for downloading
return '1.25.1'
def force_version(self, code):
"""
Return a manual version number.
The site is usually using the version number from the servers'
siteinfo, but if there is a problem with that it's possible to return
a non-empty string here representing another version number.
For example, L{pywikibot.tools.MediaWikiVersion} treats version
numbers ending with 'alpha', 'beta' or 'rc' as newer than any version
ending with 'wmf<number>'. But if that causes breakage it's possible
to override it here to a version number which doesn't cause breakage.
@return: A version number which can be parsed using
L{pywikibot.tools.MediaWikiVersion}. If empty/None it uses the
version returned via siteinfo.
@rtype: str
"""
return None
@deprecated("APISite.version()")
def versionnumber(self, code):
"""DEPRECATED, use version() instead.
Use L{pywikibot.tools.MediaWikiVersion} to compare version strings.
Return an int identifying MediaWiki version.
Currently this is implemented as returning the minor version
number; i.e., 'X' in version '1.X.Y'
"""
R = re.compile(r"(\d+).(\d+)")
M = R.search(self.version(code))
if not M:
# Version string malformatted; assume it should have been 1.10
return 10
return 1000 * int(M.group(1)) + int(M.group(2)) - 1000
def code2encoding(self, code):
"""Return the encoding for a specific language wiki."""
return 'utf-8'
def code2encodings(self, code):
"""Return list of historical encodings for a specific language Wiki."""
return self.code2encoding(code),
# aliases
def encoding(self, code):
"""Return the encoding for a specific language Wiki."""
return self.code2encoding(code)
def encodings(self, code):
"""Return list of historical encodings for a specific language Wiki."""
return self.code2encodings(code)
def __eq__(self, other):
"""Compare self with other.
If other is not a Family() object, try to create one.
"""
if not isinstance(other, Family):
other = self.load(other)
try:
return self.name == other.name
except AttributeError:
return id(self) == id(other)
def __ne__(self, other):
try:
return not self.__eq__(other)
except UnknownFamily:
return False
def __hash__(self):
return hash(self.name)
def __str__(self):
return self.name
def __repr__(self):
return 'Family("%s")' % self.name
def shared_image_repository(self, code):
"""Return the shared image repository, if any."""
return (None, None)
def shared_data_repository(self, code, transcluded=False):
"""Return the shared Wikibase repository, if any."""
return (None, None)
@deprecated("Site.server_time()")
def server_time(self, code):
"""
DEPRECATED, use Site.server_time instead.
Return a datetime object representing server time.
"""
return pywikibot.Site(code, self).server_time()
def isPublic(self, code):
"""Check the wiki require logging in before viewing it."""
return True
def post_get_convert(self, site, getText):
"""
Do a conversion on the retrieved text from the Wiki.
For example a X-conversion in Esperanto
U{https://en.wikipedia.org/wiki/Esperanto_orthography#X-system}.
"""
return getText
def pre_put_convert(self, site, putText):
"""
Do a conversion on the text to insert on the Wiki.
For example a X-conversion in Esperanto
U{https://en.wikipedia.org/wiki/Esperanto_orthography#X-system}.
"""
return putText
@property
def obsolete(self):
"""
Old codes that are not part of the family.
Interwiki replacements override removals for the same code.
@return: mapping of old codes to new codes (or None)
@rtype: dict
"""
data = dict((code, None)
for code in self.interwiki_removals)
data.update(self.interwiki_replacements)
return FrozenDict(data,
'Family.obsolete not updatable; '
'use Family.interwiki_removals '
'and Family.interwiki_replacements')
@obsolete.setter
def obsolete(self, data):
"""Split obsolete dict into constituent parts."""
self.interwiki_removals[:] = [old for (old, new) in data.items()
if new is None]
self.interwiki_replacements.clear()
self.interwiki_replacements.update((old, new)
for (old, new) in data.items()
if new is not None)
class WikimediaFamily(Family):
"""Class for all wikimedia families."""
# Code mappings which are only an alias, and there is no 'old' wiki.
# For all except 'nl_nds', subdomains do exist as a redirect, but that
# should not be relied upon.
code_aliases = {
# Country aliases; see T87002
'dk': 'da', # Wikipedia, Wikibooks and Wiktionary only.
'jp': 'ja',
# Language aliases
'nb': 'no', # T86924
# Incomplete language code change. T86915
'minnan': 'zh-min-nan',
'nan': 'zh-min-nan',
# These two probably only apply to Wikipedia.
# Server not found for the other projects.
'zh-tw': 'zh',
'zh-cn': 'zh',
# miss-spelling
'nl_nds': 'nl-nds',
}
# Not open for edits; stewards can still edit.
closed_wikis = []
# Completely removed
removed_wikis = []
# Mappings which should be in effect, even for
# closed/removed wikis
interwiki_replacement_overrides = {
# Moldovan projects are closed, however
# Romanian was to be the replacement.
'mo': 'ro',
}
def __init__(self):
super(WikimediaFamily, self).__init__()
# CentralAuth cross avaliable projects.
self.cross_projects = [
'commons', 'incubator', 'mediawiki', 'meta', 'species', 'test',
'wikibooks', 'wikidata', 'wikinews', 'wikipedia', 'wikiquote',
'wikisource', 'wikiversity', 'wiktionary',
]
@property
def interwiki_removals(self):
return frozenset(self.removed_wikis + self.closed_wikis)
@property
def interwiki_replacements(self):
rv = self.code_aliases.copy()
rv.update(self.interwiki_replacement_overrides)
return FrozenDict(rv)
def shared_image_repository(self, code):
return ('commons', 'commons')
def protocol(self, code):
"""Return 'https' as the protocol."""
return 'https'
def rcstream_host(self, code):
return 'stream.wikimedia.org'
class AutoFamily(Family):
"""Family that automatically loads the site configuration."""
def __init__(self, name, url):
"""Constructor."""
super(AutoFamily, self).__init__()
self.name = name
self.url = urlparse.urlparse(url)
self.langs = {
name: self.url.netloc
}
def protocol(self, code):
"""Return the protocol of the URL."""
return self.url.scheme
def scriptpath(self, code):
"""Extract the script path from the URL."""
if self.url.path.endswith('/api.php'):
return self.url.path[0:-8]
else:
return super(AutoFamily, self).scriptpath(code)
| trishnaguha/pywikibot-core | pywikibot/family.py | Python | mit | 64,378 |
# -*- coding: utf-8 -*-
"""Setup/installation tests for this package."""
from ade25.assetmanager.testing import IntegrationTestCase
from plone import api
class TestInstall(IntegrationTestCase):
"""Test installation of ade25.assetmanager into Plone."""
def setUp(self):
"""Custom shared utility setup for tests."""
self.portal = self.layer['portal']
self.installer = api.portal.get_tool('portal_quickinstaller')
def test_product_installed(self):
"""Test if ade25.assetmanager is installed with portal_quickinstaller."""
self.assertTrue(self.installer.isProductInstalled('ade25.assetmanager'))
def test_uninstall(self):
"""Test if ade25.assetmanager is cleanly uninstalled."""
self.installer.uninstallProducts(['ade25.assetmanager'])
self.assertFalse(self.installer.isProductInstalled('ade25.assetmanager'))
# browserlayer.xml
def test_browserlayer(self):
"""Test that IAde25AssetmanagerLayer is registered."""
from ade25.assetmanager.interfaces import IAde25AssetmanagerLayer
from plone.browserlayer import utils
self.failUnless(IAde25AssetmanagerLayer in utils.registered_layers())
| ade25/ade25.assetmanager | ade25/assetmanager/tests/test_setup.py | Python | mit | 1,209 |
from stagecraft.apps.datasets.models import DataGroup, DataSet, DataType
from django.test import TestCase
from stagecraft.libs.mass_update import DataSetMassUpdate
from nose.tools import assert_equal
class TestDataSetMassUpdate(TestCase):
@classmethod
def setUpClass(cls):
cls.data_group1 = DataGroup.objects.create(name='datagroup1')
cls.data_group2 = DataGroup.objects.create(name='datagroup2')
cls.data_type1 = DataType.objects.create(name='datatype1')
cls.data_type2 = DataType.objects.create(name='datatype2')
cls.dataset_a = DataSet.objects.create(
name='foo',
data_group=cls.data_group1,
bearer_token="abc123",
data_type=cls.data_type1)
cls.dataset_b = DataSet.objects.create(
name='bar',
data_group=cls.data_group2,
bearer_token="def456",
data_type=cls.data_type1)
cls.dataset_c = DataSet.objects.create(
name='baz',
data_group=cls.data_group2,
bearer_token="999999",
data_type=cls.data_type2)
@classmethod
def tearDownClass(cls):
pass
def test_update_bearer_token_by_date_type(self):
new_bearer_token = "ghi789"
query = {u'data_type': self.data_type1.name}
number_updated = DataSetMassUpdate \
.update_bearer_token_for_data_type_or_group_name(
query, new_bearer_token)
dataset_a = DataSet.objects.get(id=self.dataset_a.id)
dataset_b = DataSet.objects.get(id=self.dataset_b.id)
dataset_c = DataSet.objects.get(id=self.dataset_c.id)
assert_equal(number_updated, 2)
assert_equal(dataset_a.bearer_token, new_bearer_token)
assert_equal(dataset_b.bearer_token, new_bearer_token)
assert_equal(dataset_c.bearer_token == new_bearer_token, False)
def test_update_bearer_token_by_data_group(self):
new_bearer_token = "ghi789"
query = {u'data_group': self.data_group2.name}
number_updated = DataSetMassUpdate \
.update_bearer_token_for_data_type_or_group_name(
query, new_bearer_token)
dataset_a = DataSet.objects.get(id=self.dataset_a.id)
dataset_b = DataSet.objects.get(id=self.dataset_b.id)
dataset_c = DataSet.objects.get(id=self.dataset_c.id)
assert_equal(number_updated, 2)
assert_equal(dataset_a.bearer_token == new_bearer_token, False)
assert_equal(dataset_b.bearer_token, new_bearer_token)
assert_equal(dataset_c.bearer_token, new_bearer_token)
def test_update_bearer_token_by_data_group_and_data_type(self):
new_bearer_token = "ghi789"
query = {
u'data_type': self.data_type1.name,
u'data_group': self.data_group2.name}
number_updated = DataSetMassUpdate \
.update_bearer_token_for_data_type_or_group_name(
query, new_bearer_token)
dataset_a = DataSet.objects.get(id=self.dataset_a.id)
dataset_b = DataSet.objects.get(id=self.dataset_b.id)
dataset_c = DataSet.objects.get(id=self.dataset_c.id)
assert_equal(number_updated, 1)
assert_equal(dataset_a.bearer_token == new_bearer_token, False)
assert_equal(dataset_b.bearer_token, new_bearer_token)
assert_equal(dataset_c.bearer_token == new_bearer_token, False)
| alphagov/stagecraft | stagecraft/libs/mass_update/test_data_set_mass_update.py | Python | mit | 3,422 |
#!/usr/bin/env python
import glob
import os
import shlex
import sys
script_dir = os.path.dirname(__file__)
node_root = os.path.normpath(os.path.join(script_dir, os.pardir))
sys.path.insert(0, os.path.join(node_root, 'tools', 'gyp', 'pylib'))
import gyp
# Directory within which we want all generated files (including Makefiles)
# to be written.
output_dir = os.path.join(os.path.abspath(node_root), 'out')
def run_gyp(args):
rc = gyp.main(args)
if rc != 0:
print 'Error running GYP'
sys.exit(rc)
if __name__ == '__main__':
args = sys.argv[1:]
# GYP bug.
# On msvs it will crash if it gets an absolute path.
# On Mac/make it will crash if it doesn't get an absolute path.
if sys.platform == 'win32':
args.append(os.path.join(node_root, 'node.gyp'))
common_fn = os.path.join(node_root, 'common.gypi')
options_fn = os.path.join(node_root, 'config.gypi')
options_fips_fn = os.path.join(node_root, 'config_fips.gypi')
else:
args.append(os.path.join(os.path.abspath(node_root), 'node.gyp'))
common_fn = os.path.join(os.path.abspath(node_root), 'common.gypi')
options_fn = os.path.join(os.path.abspath(node_root), 'config.gypi')
options_fips_fn = os.path.join(os.path.abspath(node_root), 'config_fips.gypi')
if os.path.exists(common_fn):
args.extend(['-I', common_fn])
if os.path.exists(options_fn):
args.extend(['-I', options_fn])
if os.path.exists(options_fips_fn):
args.extend(['-I', options_fips_fn])
args.append('--depth=' + node_root)
# There's a bug with windows which doesn't allow this feature.
if sys.platform != 'win32' and 'ninja' not in args:
# Tell gyp to write the Makefiles into output_dir
args.extend(['--generator-output', output_dir])
# Tell make to write its output into the same dir
args.extend(['-Goutput_dir=' + output_dir])
args.append('-Dcomponent=static_library')
args.append('-Dlibrary=static_library')
gyp_args = list(args)
run_gyp(gyp_args)
| dreamllq/node | tools/gyp_node.py | Python | apache-2.0 | 1,983 |
# Copyright 2021 The Google Earth Engine Community Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START earthengine__apidocs__ee_dictionary_aside]
# A dictionary (e.g. results of ee.Image.reduceRegion of an S2 image).
dic = {
'B1': 182,
'B2': 219,
'B3': 443
}
def print_dic(dic):
"""Prints the dictionary."""
print('ee.Dictionary from client-side dictionary:', dic.getInfo())
# Print a message when constructing the ee.Dictionary.
ee_dic = ee.Dictionary(dic).aside(print_dic)
# [END earthengine__apidocs__ee_dictionary_aside]
| google/earthengine-community | samples/python/apidocs/ee_dictionary_aside.py | Python | apache-2.0 | 1,056 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Convenient utility functions for exercises in Chapter 5 of Kane 1985."""
from __future__ import division
from sympy import Dummy, Matrix
from sympy import diff, expand, expand_trig, integrate, solve, symbols
from sympy import trigsimp
from sympy.physics.mechanics import ReferenceFrame, Point, Particle, RigidBody
from sympy.physics.mechanics import cross, dot, Vector
from sympy.physics.mechanics import MechanicsStrPrinter
from sympy.physics.mechanics import inertia_of_point_mass
def msprint(expr):
pr = MechanicsStrPrinter()
return pr.doprint(expr)
def subs(x, *args, **kwargs):
if x == 0:
return x
if not hasattr(x, 'subs'):
if hasattr(x, '__iter__'):
return map(lambda x: subs(x, *args, **kwargs), x)
return x.subs(*args, **kwargs).doit()
class PartialVelocity(dict):
def __init__(self, frame, ulist, *args, **kwargs):
self._set_frame(frame)
self._set_ulist(ulist)
dict.__init__(self, *args, **kwargs)
def _set_frame(self, f):
if not isinstance(f, ReferenceFrame):
raise TypeError(
'{0} is not an instance of ReferenceFrame'.format(f))
self._frame = f
def _set_ulist(self, u):
if not isinstance(u, list):
raise TypeError(
'{0} is not an instance of list'.format(f))
self._ulist = u
@property
def frame(self):
return self._frame
@property
def ulist(self):
return self._ulist
def partial_velocities(system, generalized_speeds, frame,
kde_map=None, constraint_map=None, express_frame=None):
partials = PartialVelocity(frame, generalized_speeds)
if express_frame is None:
express_frame = frame
for p in system:
if p in partials:
continue
if isinstance(p, Point):
v = p.vel(frame)
elif isinstance(p, ReferenceFrame):
v = p.ang_vel_in(frame)
else:
raise TypeError('A Point or ReferenceFrame must be supplied.')
if kde_map is not None:
v = subs(v, kde_map)
if constraint_map is not None:
v = subs(v, constraint_map)
v_r_p = {}
for u in generalized_speeds:
v_r_p[u] = Vector([]) if v == 0 else v.diff(u, express_frame)
partials[p] = v_r_p
return partials
def generalized_active_forces(partials, forces, uaux=None):
# use the same frame used in calculating partial velocities
ulist = partials.ulist
if uaux is not None:
uaux_zero = dict(zip(uaux, [0] * len(uaux)))
Fr = [0] * len(ulist)
for pf in forces:
p = pf[0] # first arg is point/rf
f = pf[1] # second arg is force/torque
for i, u in enumerate(ulist):
if partials[p][u] != 0 and f != 0:
r = dot(partials[p][u], f)
# if more than 2 args, 3rd is an integral function, where the
# input is the integrand
if len(pf) > 2:
r = pf[2](r)
# auxilliary speeds have no effect on original active forces
if uaux is not None and u not in uaux:
r = subs(r, uaux_zero)
Fr[i] += r
return Fr, ulist
def _calculate_T_star(rb, frame, kde_map, constraint_map, uaux):
# get central inertia
# I_S/O = I_S/S* + I_S*/O
I = rb.inertia[0] - inertia_of_point_mass(rb.mass,
rb.masscenter.pos_from(rb.inertia[1]), rb.frame)
alpha = rb.frame.ang_acc_in(frame)
omega = rb.frame.ang_vel_in(frame)
if uaux is not None:
# auxilliary speeds do not change alpha, omega
# use doit() to evaluate terms such as
# Derivative(0, t) to 0.
uaux_zero = dict(zip(uaux, [0] * len(uaux)))
alpha = subs(alpha, uaux_zero)
omega = subs(omega, uaux_zero)
if kde_map is not None:
alpha = subs(alpha, kde_map)
omega = subs(omega, kde_map)
if constraint_map is not None:
alpha = subs(alpha, constraint_map)
omega = subs(omega, constraint_map)
return -dot(alpha, I) - dot(cross(omega, I), omega)
def generalized_inertia_forces(partials, bodies,
kde_map=None, constraint_map=None,
uaux=None):
# use the same frame used in calculating partial velocities
ulist = partials.ulist
frame = partials.frame
if uaux is not None:
uaux_zero = dict(zip(uaux, [0] * len(uaux)))
Fr_star = [0] * len(ulist)
for b in bodies:
if isinstance(b, RigidBody):
p = b.masscenter
m = b.mass
elif isinstance(b, Particle):
p = b.point
m = b.mass
else:
raise TypeError('{0} is not a RigidBody or Particle.'.format(b))
# get acceleration of point
a = p.acc(frame)
if uaux is not None:
# auxilliary speeds do not change a
a = subs(a, uaux_zero)
if kde_map is not None:
a = subs(a, kde_map)
if constraint_map is not None:
a = subs(a, constraint_map)
# get T* for RigidBodys
if isinstance(b, RigidBody):
T_star = _calculate_T_star(b, frame, kde_map, constraint_map, uaux)
for i, u in enumerate(ulist):
force_term = 0
torque_term = 0
# inertia force term
force_term = dot(partials[p][u], -m*a)
# add inertia torque term for RigidBodys
if isinstance(b, RigidBody):
torque_term = dot(partials[b.frame][u], T_star)
# auxilliary speeds have no effect on original inertia forces
if uaux is not None and u not in uaux:
force_term = subs(force_term, uaux_zero)
torque_term = subs(torque_term, uaux_zero)
Fr_star[i] += force_term + torque_term
return Fr_star, ulist
def _equivalent_derivatives(dV_dq_list, q):
dV_eq = []
for r in range(len(q)):
for s in range(r + 1, len(q)):
dV_eq.append(dV_dq_list[r].diff(q[s]) - dV_dq_list[s].diff(q[r]))
return dV_eq
def _f_variables(Fr, q, dV_eq, dV_dq):
Fr_qi_only = []
non_arg = set()
for i, fr in enumerate(Fr):
dfrdqi = [j for j, x in enumerate(q) if fr.diff(x) != 0]
# If generalized force is only a function of one generalized coordinate
# save the indices of force, coordinate.
if len(dfrdqi) == 1:
Fr_qi_only.append((i, dfrdqi[0]))
for fr_idx, qi_idx in Fr_qi_only:
# If Fr = -∂V/∂qi, then fs-p is independent of qi.
if Fr[fr_idx] - dV_eq[fr_idx] == dV_dq[qi_idx]:
non_arg.add(q[qi_idx])
return sorted(list(set(q) - non_arg)) + [symbols('t')], list(non_arg)
def kde_matrix(u, kde_map):
"""Returns the matrices W_sr, X_s which are defined by the equation
q_dot = W_sr*u_r + X_s
where q_dot is the vector [q1', q2', ..., qn'] and u_r is the
vector [u1, u2, ..., un].
The arg 'u' is u_r. Each element of q_dot is a key in 'kde_map' where
the corresponding value is sum(W_sr[s, r] * u_r[r], (r, 1, n)) + X_s[s].
"""
q_dot_values = Matrix(zip(*sorted(
[(x, y) for x, y in kde_map.iteritems()
if x.variables == (symbols('t'),)],
cmp=lambda x, y: x[0].compare(y[0])))[1])
W_sr = Matrix(map(lambda x: q_dot_values.T.diff(x), u)).T
X_s = q_dot_values - W_sr*Matrix(u)
return W_sr, X_s
def vc_matrix(u, vc_map):
"""Returns the matrices A_kr, B_k which are defined by the equation
u_k = A_kr*u_r + B_k
where u_k is the vector [up+1, ..., un] and u_r is the
vector [u1, u2, ..., un].
The arg 'u' is u_r. Each element of u_k is a key in 'vc_map' where
the corresponding value is sum(A_kr[k, r] * u_r[r], (r, 1, n)) + B_k[k].
"""
vc_map_values = Matrix(zip(*sorted(
[(x, y) for x, y in vc_map.iteritems()],
cmp=lambda x, y: x[0].compare(y[0])))[1])
A_kr = Matrix(map(lambda x: vc_map_values.T.diff(x), u)).T
B_k = vc_map_values - A_kr*Matrix(u)
return A_kr, B_k
def generalized_active_forces_V(V, q, u, kde_map, vc_map=None):
"""Returns a list of the generalized active forces using equation 5.1.18
from Kane 1985.
'V' is a potential energy function for the system.
'q' is a list of generalized coordinates.
'u' is a list of the independent generalized speeds.
'kde_map' is a dictionary with q dots as keys and the equivalent
expressions in terms of q's and u's as values.
'vc_map' is a dictionay with the dependent u's as keys and the expression
in terms of independent u's as values.
"""
n = len(q)
p = len(u)
m = n - p
if vc_map is None:
A_kr = Matrix.zeros(m, p)
else:
A_kr, _ = vc_matrix(u, vc_map)
u += sorted(vc_map.keys(), cmp=lambda x, y: x.compare(y))
W_sr, _ = kde_matrix(u, kde_map)
dV_dq = map(lambda x: diff(V, x), q)
Fr = Matrix.zeros(1, p)
for s in range(n):
Fr -= dV_dq[s] * (W_sr[s, :p] + W_sr[s, p:]*A_kr[:, :p])
return Fr[:]
def function_from_partials(df_dq, q, zero_constants=False):
"""Returns a function given a list of partial derivatives of the
function and a list of variables of which the partial derivative is
given. For a function f(q1, ..., qn):
'df_dq' is the list [∂ℱ/∂q1, ..., ∂ℱ/∂qn]
'q' is the list [q1, ..., qn]
'zero_constants' is True if zero should be used for integration constants.
Symbols C, α1, ..., αn are used for integration constants.
"""
alpha = symbols('α1:{0}'.format(len(q) + 1))
f, zeta = symbols('C ζ')
q_alpha = zip(q, alpha)
for i, df_dqr in enumerate(df_dq):
if hasattr(df_dqr, 'subs'):
integrand = df_dqr.subs(dict(q_alpha[i + 1:])).subs(q[i], zeta)
else:
integrand = df_dqr
f += integrate(expand_trig(integrand), (zeta, alpha[i], q[i]))
if zero_constants:
f = f.subs(dict(zip([symbols('C')] + list(alpha),
[0] * (len(q) + 1))))
return f
def potential_energy(Fr, q, u, kde_map, vc_map=None):
"""Returns a potential energy function using the method from Section 5.1
from Kane 1985.
'Fr' is a list of the generalized active forces for the system.
'q' is a list of generalized coordinates.
'u' is a list of the independent generalized speeds.
'kde_map' is a dictionary with q dots as keys and the equivalent
expressions in terms of q's and u's as values.
'vc_map' is a dictionay with the dependent u's as keys and the expression
in terms of independent u's as values.
"""
n = len(q)
p = len(u)
m = n - p
if vc_map is not None:
u += sorted(vc_map.keys(), cmp=lambda x, y: x.compare(y))
dV_dq = symbols('∂V/∂q1:{0}'.format(n + 1))
dV_eq = Matrix(Fr).T
W_sr, _ = kde_matrix(u, kde_map)
if vc_map is not None:
A_kr, _ = vc_matrix(u, vc_map)
else:
A_kr = Matrix.zeros(m, p)
for s in range(W_sr.shape[0]):
dV_eq += dV_dq[s] * (W_sr[s, :p] + W_sr[s, p:]*A_kr[:, :p])
if vc_map is not None:
f_arg, non_arg = _f_variables(Fr, q, dV_eq, dV_dq)
f = map(lambda x: x(*f_arg),
symbols('f1:{0}'.format(m + 1)))
dV_eq = subs(dV_eq, dict(zip(dV_dq[-m:], f)))
dV_dq = dV_dq[:-m]
dV_dq_map = solve(dV_eq, dV_dq)
dV_dq_list = map(lambda x: dV_dq_map[x], dV_dq)
if vc_map is None:
#print('Checking ∂/∂qr(∂V/∂qs) = ∂/∂qs(∂V/∂qr) for all r, s '
# '= 1, ..., n.')
dV_eq = _equivalent_derivatives(dV_dq_list, q)
if dV_eq != [0] * (n*(n - 1)//2):
rs = [(r, s) for r in range(n) for s in range(r + 1, n)]
for (r, s), x in zip(rs, dV_eq):
if trigsimp(expand(x)) != 0:
print(('∂/∂q{0}(∂V/∂q{1}) != ∂/∂q{1}(∂V/∂q{0}). ' +
'V does NOT exist.').format(r + 1, s + 1))
print('∂/∂q{0}(∂V/∂q{1}) = {2}'.format(
r + 1, s + 1, dV_dq_list[r].diff(q[s])))
print('∂/∂q{1}(∂V/∂q{0}) = {2}'.format(
r + 1, s + 1, dV_dq_list[s].diff(q[r])))
return None
else:
dV_dq_list += f
# Unable to take diff of 'fm.diff(qs)', replace with dummy symbols.
dfdq = [Dummy('∂f{0}/∂q{1}'.format(i + 1, j + 1))
for i in range(len(f)) for j in range(n)]
dfdq_replace = lambda x: reduce(
lambda y, z: y.replace(z[0], z[1]) if z[0] != 0 else y,
zip([fm.diff(qs) for fm in f for qs in q], dfdq),
x)
dV_eq = map(dfdq_replace,
_equivalent_derivatives(dV_dq_list, q))
X = Matrix(dfdq)
Z = Matrix([map(lambda x: diff(dV_eqi, x), dfdq)
for dV_eqi in dV_eq])
if Z.rank() == n * (n - 1) / 2:
print('ρ == n(n - 1)/2')
print('V may exist but cannot be found by this procedure.')
return None
Y = expand(Z*X - Matrix(dV_eq))
ZI_rref, _ = Matrix.hstack(Z, Matrix.eye(Z.shape[0])).rref()
# E is the matrix of elementary row operations that gives rref(Z).
E = ZI_rref[:, Z.shape[1]:]
f_eq = (E * Y)[Z.rank():]
f_map = solve(f_eq, f)
if sorted(f_map.keys(), cmp=lambda x, y: x.compare(y)) != f:
print('Unable to solve for all f uniquely.')
return None
for k, v in f_map.iteritems():
for qi in non_arg:
if v.diff(qi) != 0:
print('{0} should not be a function of {1}'.format(k, qi))
return None
dV_dq_list = map(trigsimp, (subs(dV_dq_list, f_map)))
return function_from_partials(dV_dq_list, q)
def inertia_coefficient_contribution(body, partials, r, s):
"""Returns the contribution of a rigid body (or particle) to the inertia
coefficient m_rs of a system.
'body' is an instance of a RigidBody or Particle.
'partials' is an instance of a PartialVelocity.
'r' is the first generalized speed.
's' is the second generlized speed.
"""
if isinstance(body, Particle):
m_rs = body.mass * dot(partials[body.point][r],
partials[body.point][s])
elif isinstance(body, RigidBody):
m_rs = body.mass * dot(partials[body.masscenter][r],
partials[body.masscenter][s])
m_rs += dot(dot(partials[body.frame][r], body.central_inertia),
partials[body.frame][s])
else:
raise TypeError(('{0} is not a RigidBody or Particle.').format(body))
return m_rs
def inertia_coefficient_matrix(system, partials):
"""Returns the inertia coefficient matrix for a system of RigidBody's
and Particle's. Each entry in the matrix, m_rs, is the inertia
coefficient for generalized speeds r, s.
'system' is a list where the elements are instances of RigidBody
or Particle.
'partials' is an instance of a PartialVelocity.
Note: The order of the inertia coefficients is dependent on the order
of the generalized speeds used when calculating partial velocities.
"""
ulist = partials.ulist
M = Matrix.zeros(len(ulist))
for i, r in enumerate(ulist):
for j, s in enumerate(ulist[i:], i):
for p in system:
m_rs = inertia_coefficient_contribution(p, partials, r, s)
M[i, j] += m_rs
if i != j:
M[j, i] = M[i, j]
return M
def generalized_inertia_forces_K(K, q, u, kde_map, vc_map=None):
"""Returns a list of the generalized inertia forces using equation 5.6.6
from Kane 1985.
'K' is a potential energy function for the system.
'q' is a list of generalized coordinates.
'u' is a list of the independent generalized speeds.
'kde_map' is a dictionary with q dots as keys and the equivalent
expressions in terms of q's and u's as values.
'vc_map' is a dictionay with the dependent u's as keys and the expression
in terms of independent u's as values.
"""
n = len(q)
p = len(u)
m = n - p
t = symbols('t')
if vc_map is None:
A_kr = Matrix.zeros(m, p)
else:
A_kr, _ = vc_matrix(u, vc_map)
u += sorted(vc_map.keys(), cmp=lambda x, y: x.compare(y))
W_sr, _ = kde_matrix(u, kde_map)
K_partial_term = [K.diff(q_s.diff(t)).diff(t) - K.diff(q_s) for q_s in q]
K_partial_term = subs(K_partial_term, kde_map)
if vc_map is not None:
K_partial_term = subs(K_partial_term, vc_map)
Fr_star = Matrix.zeros(1, p)
for s in range(n):
Fr_star -= K_partial_term[s] * (W_sr[s, :p] + W_sr[s, p:]*A_kr[:, :p])
return Fr_star[:]
| skidzo/pydy | examples/Kane1985/Chapter5/util.py | Python | bsd-3-clause | 17,109 |
# Authors:
# Petr Viktorin <pviktori@redhat.com>
#
# Copyright (C) 2014 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Pytest plugin for Declarative tests"""
def pytest_generate_tests(metafunc):
"""Generates Declarative tests"""
if 'declarative_test_definition' in metafunc.fixturenames:
name = metafunc.cls.__name__
tests = []
descriptions = []
for i, test in enumerate(metafunc.cls.tests):
if callable(test):
description = '%s: %s' % (
str(i).zfill(4),
test.__name__, # test is not a dict. pylint: disable=E1103
)
else:
description = '%s: %s: %s' % (str(i).zfill(4),
test['command'][0],
test.get('desc', ''))
test = dict(test)
test['nice'] = description
tests.append(test)
descriptions.append(description)
metafunc.parametrize(
['index', 'declarative_test_definition'],
enumerate(tests),
ids=descriptions,
)
| pspacek/freeipa | ipatests/pytest_plugins/declarative.py | Python | gpl-3.0 | 1,823 |
#! /usr/bin/env python
#
# IM - Infrastructure Manager
# Copyright (C) 2011 - GRyCAP - Universitat Politecnica de Valencia
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import unittest
import sys
import yaml
from mock import MagicMock
sys.path.append("..")
sys.path.append(".")
from IM.VirtualMachine import VirtualMachine
from radl.radl_parse import parse_radl
from radl.radl import system, Feature
from IM.InfrastructureInfo import InfrastructureInfo
from IM.tosca.Tosca import Tosca
def read_file_as_string(file_name):
tests_path = os.path.dirname(os.path.abspath(__file__))
abs_file_path = os.path.join(tests_path, file_name)
return open(abs_file_path, 'r').read()
class TestTosca(unittest.TestCase):
def __init__(self, *args):
unittest.TestCase.__init__(self, *args)
def test_tosca_to_radl(self):
"""Test TOSCA RADL translation"""
tosca_data = read_file_as_string('../files/tosca_long.yml')
tosca = Tosca(tosca_data)
_, radl = tosca.to_radl()
radl = parse_radl(str(radl))
net = radl.get_network_by_id('public_net')
net1 = radl.get_network_by_id('public_net_1')
net2 = radl.get_network_by_id('private_net')
self.assertEqual(net2.getValue('provider_id'), 'provider_id')
self.assertIn(net.getValue('provider_id'), ['pool_name', None])
if net.getValue('provider_id') is None:
self.assertEqual(net1.getValue('provider_id'), 'pool_name')
self.assertIn('1:4/tcp', net.getValue("outports"))
self.assertIn('80/tcp-80/tcp', net.getValue("outports"))
self.assertIn('8080/tcp-8080/tcp', net.getValue("outports"))
self.assertEqual(net1.getValue("outports"), '8080/tcp-8080/tcp')
else:
self.assertEqual(net.getValue('provider_id'), 'pool_name')
self.assertEqual(net.getValue("outports"), '8080/tcp-8080/tcp')
self.assertIn('1:4/tcp', net1.getValue("outports"))
self.assertIn('80/tcp-80/tcp', net1.getValue("outports"))
self.assertIn('8080/tcp-8080/tcp', net1.getValue("outports"))
self.assertIn('10000/tcp-10000/tcp', net2.getValue("outports"))
lrms_wn = radl.get_system_by_name('lrms_wn')
self.assertEqual(lrms_wn.getValue('memory.size'), 2000000000)
lrms_server = radl.get_system_by_name('lrms_server')
self.assertEqual(lrms_server.getValue('memory.size'), 1000000000)
self.assertEqual(lrms_server.getValue('net_interface.0.dns_name'), 'slurmserver')
self.assertEqual("cloudid", radl.deploys[0].cloud_id)
self.assertEqual("cloudid", radl.deploys[1].cloud_id)
self.assertEqual("cloudid", radl.deploys[2].cloud_id)
other_server = radl.get_system_by_name('other_server')
self.assertEqual(other_server.getValue("availability_zone"), 'some_zone')
self.assertEqual(lrms_wn.getValue("disk.1.size"), 10000000000)
self.assertEqual(lrms_wn.getValue("disk.1.type"), 'ssd')
self.assertEqual(lrms_wn.getValue("spot"), 'no')
self.assertEqual(lrms_wn.getValue("instance_type"), 'some_type')
lrms_front_end_conf = radl.get_configure_by_name('lrms_front_end_conf')
conf = yaml.safe_load(lrms_front_end_conf.recipes)[0]
self.assertEqual(conf['vars']['front_end_ip'],
"{{ hostvars[groups['lrms_server'][0]]['IM_NODE_PRIVATE_IP'] }}")
self.assertEqual(conf['vars']['wn_ips'],
"{{ groups['lrms_wn']|map('extract', hostvars,'IM_NODE_PRIVATE_IP')|list"
" if 'lrms_wn' in groups else []}}")
self.assertEqual([d.id for d in radl.deploys][2], 'lrms_wn')
def test_tosca_get_outputs(self):
"""Test TOSCA get_outputs function"""
tosca_data = read_file_as_string('../files/tosca_create.yml')
tosca = Tosca(tosca_data)
_, radl = tosca.to_radl()
radl1 = radl.clone()
radl1.systems = [radl.get_system_by_name('web_server')]
radl1.systems[0].setValue("net_interface.1.ip", "158.42.1.1")
radl1.systems[0].setValue("disk.0.os.credentials.username", "ubuntu")
radl1.systems[0].setValue("disk.0.os.credentials.password", "pass")
inf = InfrastructureInfo()
vm = VirtualMachine(inf, "1", None, radl1, radl1, None)
vm.requested_radl = radl1
inf.vm_list = [vm]
outputs = tosca.get_outputs(inf)
self.assertEqual(outputs, {'server_url': ['158.42.1.1'],
'server_creds': {'token_type': 'password',
'token': 'pass',
'user': 'ubuntu'}})
def test_tosca_nets_to_radl(self):
"""Test TOSCA RADL translation with nets"""
tosca_data = read_file_as_string('../files/tosca_nets.yml')
tosca = Tosca(tosca_data)
_, radl = tosca.to_radl()
print(radl)
radl = parse_radl(str(radl))
net = radl.get_network_by_id('pub_network')
net1 = radl.get_network_by_id('network1')
self.assertEqual('1194/udp-1194/udp', net.getValue("outports"))
self.assertEqual('192.168.0.0/16,vr1_compute', net1.getValue("router"))
self.assertEqual('yes', net1.getValue("create"))
self.assertEqual('192.168.10.0/24', net1.getValue("cidr"))
lrms_wn = radl.get_system_by_name("lrms_wn")
self.assertEqual("network1", lrms_wn.getValue("net_interface.0.connection"))
lrms_server = radl.get_system_by_name("lrms_server")
self.assertEqual("network1", lrms_server.getValue("net_interface.0.connection"))
self.assertEqual("pub_network", lrms_server.getValue("net_interface.1.connection"))
self.assertEqual("slurmserver", lrms_server.getValue("net_interface.0.dns_name"))
def test_merge_yaml(self):
"""Test TOSCA merge two yamls"""
a = {"wn_port": {"requirements": [{"binding": "lrms_wn"}, {"link": "network1"}]}}
b = {"wn_port": {"requirements": [{"binding": "lrms_wn"}, {"link": "network2"}]}}
c = Tosca._merge_yaml(a, b)
self.assertEqual(c, b)
a = {"requirements": [{"binding": "lrms_wn"}, {"link": "network1"}]}
b = {"requirements": [{"binding": "lrms_wn"}, {"link": "network2"}, {"other": "value"}]}
c = Tosca._merge_yaml(a, b)
self.assertEqual(c, b)
def test_tosca_add_hybrid1(self):
tosca_data = read_file_as_string('../files/tosca_add_hybrid_l2.yml')
tosca = Tosca(tosca_data)
inf_info = MagicMock()
vm1 = MagicMock()
system1 = system("lrms_server", [Feature("disk.0.image.url", "=", "ost://cloud1.com/image1"),
Feature("net_interface.0.connection", "=", "network1")])
vm1.info.systems = [system1]
vm2 = MagicMock()
system2 = system("lrms_wn", [Feature("disk.0.image.url", "=", "ost://cloud1.com/image1"),
Feature("net_interface.0.connection", "=", "network1")])
vm2.info.systems = [system2]
inf_info.get_vm_list_by_system_name.return_value = {"lrms_server": [vm1], "lrms_wn": [vm2]}
_, radl = tosca.to_radl(inf_info)
print(radl)
radl = parse_radl(str(radl))
lrms_wn = radl.get_system_by_name("lrms_wn")
self.assertEqual("network2", lrms_wn.getValue("net_interface.0.connection"))
def test_tosca_add_hybrid2(self):
tosca_data = read_file_as_string('../files/tosca_add_hybrid.yml')
tosca = Tosca(tosca_data)
inf_info = MagicMock()
vm1 = MagicMock()
system1 = system("lrms_server", [Feature("disk.0.image.url", "=", "ost://cloud1.com/image1"),
Feature("net_interface.0.connection", "=", "private_net")])
vm1.info.systems = [system1]
vm2 = MagicMock()
system2 = system("lrms_wn", [Feature("disk.0.image.url", "=", "ost://cloud3.com/image1"),
Feature("net_interface.0.connection", "=", "private.cloud3.com")])
vm2.info.systems = [system2]
inf_info.get_vm_list_by_system_name.return_value = {"lrms_server": [vm1], "lrms_wn": [vm2]}
net = MagicMock()
net.isPublic.return_value = False
inf_info.radl.get_network_by_id.return_value = net
_, radl = tosca.to_radl(inf_info)
print(radl)
radl = parse_radl(str(radl))
lrms_wn = radl.get_system_by_name("lrms_wn")
self.assertEqual("private.cloud2.com", lrms_wn.getValue("net_interface.0.connection"))
if __name__ == "__main__":
unittest.main()
| indigo-dc/im | test/unit/Tosca.py | Python | gpl-3.0 | 9,301 |
"""
|▛▀▜|▙▄▟|▗▄▖|▝▀▘|
⌮⌬
○◉□▣◔◑◕●♾⚝☺☹✨✩❄❅❆✾⚛✗✓✔✘
"""
# import sys
from .termapp import TermApp
class Turtle:
dx = [1, 0, -1, 0]
dy = [0, -1, 0, 1]
def __init__(self, x, y, d):
self.jump(x, y, d)
self.stack = []
def turn(self, r):
self.d = (self.d + r) % 4
def move(self):
self.x += Turtle.dx[self.d]
self.y += Turtle.dy[self.d]
def push(self):
self.stack.append((self.x, self.y, self.d))
def pop(self):
self.x, self.y, self.d = self.stack[-1]
del self.stack[-1]
def jump(self, x, y, d):
self.x = x
self.y = y
self.d = d
class Canvas:
def __init__(self, width, height, turtle=None):
self.width = width
self.height = height
self.turtle = Turtle(0, 0, 0) if turtle is None else Turtle(*turtle)
self.field = [[0 for i in range(width)] for j in range(height)]
def put(self, s):
for c in s:
if c == '+':
self.turtle.turn(1)
continue
if c == '-':
self.turtle.turn(-1)
continue
if c == '[':
self.turtle.push()
continue
if c == ']':
self.turtle.pop()
continue
if c.isupper():
w, h = self.width, self.height
self.field[self.turtle.y % h][self.turtle.x % w] \
|= (1 << self.turtle.d)
self.turtle.move()
self.field[self.turtle.y % h][self.turtle.x % w] \
|= (1 << ((self.turtle.d+2) % 4))
continue
if c.islower():
self.turtle.move()
return self
def render(self):
chars = " ─│╰──╯┴│╭│├╮┬┤┼"
return list(map(lambda l: ''.join(
map(lambda c: chars[c], l)), self.field))
class LSystem:
def __init__(self, axiom, rules):
self.axiom = axiom
self.rules = rules
def compute(self, n):
state = self.axiom
for i in range(n):
state = ''.join(map(
lambda c: self.rules.get(c, c),
state))
return state
def make_frame(w, h):
line = 'F' * (w - 1) + '-' + 'F' * (h - 1) \
+ '-' + 'F' * (w - 1) + '-' + 'F' * (h - 1)
cx = Canvas(w, h)
cx.put(line)
return cx.render()
if __name__ == "__main__":
with TermApp() as tout:
width = tout.get_cols()
height = tout.get_lines()
cx = Canvas(28, 24)
cx.turtle.jump(13, 17, 0)
cx.put(LSystem('F', {'F': 'FF-F-'}).compute(6))
ls = cx.render()
fr1 = make_frame(width - 18, 7)
fr2 = make_frame(width - 18, 12)
tout << tout.reset_color() << ['setfg', 140, 0, 40]
tout << ['clear'] << ['cup', 1, 1]
for l in ls:
tout << ['sc'] << l << ['rc'] << ['cud', 1]
tout << ['setfg', 170, 110, 10] << ['cup', 10, 10] << 'λみ'
tout << ['setfg', 50, 120, 90] << ['cup', 5, 16]
for l in fr1:
tout << ['sc'] << l << ['rc'] << ['cud', 1]
tout << ['setfg', 50, 80, 120] << ['cud', 1]
for l in fr2:
tout << ['sc'] << l << ['rc'] << ['cud', 1]
tout << ['setfg', 160, 220, 100] << ['cup', 3, 20]
tout << ['sitm']
tout << " Noodles 0.1.0 - worker console "
tout << ['cup', 4, 20] << ['rev'] << ['setfg', 40, 40, 40] \
<< "▆"*(width - 24)
tout << ['sgr0'] << ['op'] << ['setfg', 100, 160, 60] << ['sitm']
tout << ['cup', 5, 19] << "(status)"
tout << ['cup', 13, 19] << "(job list)"
tout << ['cup', 25, 1] << ['op'] << ['sgr0']
| NLeSC/noodles | noodles/display/lines.py | Python | apache-2.0 | 3,855 |
import sys
# Set default encoding to UTF-8
reload(sys)
# noinspection PyUnresolvedReferences
sys.setdefaultencoding('utf-8')
import base64
import time
import json
import httplib
import traceback
import click
from flask import Flask, request, render_template, url_for, redirect, g
from flask.ext.cache import Cache
from flask.ext.login import LoginManager, current_user
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.assets import Environment, Bundle
from flask_ldap_login import LDAPLoginManager
from werkzeug.routing import BaseConverter
from werkzeug.exceptions import HTTPException
from sqlalchemy.ext.declarative import declarative_base
from .modules.search.models import Search
from .lib.util import to_canonical, remove_ext, mkdir_safe, gravatar_url, to_dict
from .lib.hook import HookModelMeta, HookMixin
from .lib.util import is_su, in_virtualenv
from .version import __version__
class Application(Flask):
def __call__(self, environ, start_response):
path_info = environ.get('PATH_INFO')
if path_info and len(path_info) > 1 and path_info.endswith('/'):
environ['PATH_INFO'] = path_info[:-1]
scheme = environ.get('HTTP_X_SCHEME')
if scheme:
environ['wsgi.url_scheme'] = scheme
real_ip = environ.get('HTTP_X_REAL_IP')
if real_ip:
environ['REMOTE_ADDR'] = real_ip
return super(Application, self).__call__(environ, start_response)
def discover(self):
import_name = 'realms.modules'
fromlist = (
'assets',
'commands',
'models',
'views',
'hooks'
)
start_time = time.time()
__import__(import_name, fromlist=fromlist)
for module_name in self.config['MODULES']:
sources = __import__('%s.%s' % (import_name, module_name), fromlist=fromlist)
if hasattr(sources, 'init'):
sources.init(self)
# Blueprint
if hasattr(sources, 'views'):
self.register_blueprint(sources.views.blueprint, url_prefix=self.config['RELATIVE_PATH'])
# Click
if hasattr(sources, 'commands'):
cli.add_command(sources.commands.cli, name=module_name)
# Hooks
if hasattr(sources, 'hooks'):
if hasattr(sources.hooks, 'before_request'):
self.before_request(sources.hooks.before_request)
if hasattr(sources.hooks, 'before_first_request'):
self.before_first_request(sources.hooks.before_first_request)
# print >> sys.stderr, ' * Ready in %.2fms' % (1000.0 * (time.time() - start_time))
def make_response(self, rv):
if rv is None:
rv = '', httplib.NO_CONTENT
elif not isinstance(rv, tuple):
rv = rv,
rv = list(rv)
if isinstance(rv[0], (list, dict)):
rv[0] = self.response_class(json.dumps(rv[0]), mimetype='application/json')
return super(Application, self).make_response(tuple(rv))
class Assets(Environment):
default_filters = {'js': 'rjsmin', 'css': 'cleancss'}
default_output = {'js': 'assets/%(version)s.js', 'css': 'assets/%(version)s.css'}
def register(self, name, *args, **kwargs):
ext = args[0].split('.')[-1]
filters = kwargs.get('filters', self.default_filters[ext])
output = kwargs.get('output', self.default_output[ext])
return super(Assets, self).register(name, Bundle(*args, filters=filters, output=output))
class MyLDAPLoginManager(LDAPLoginManager):
@property
def attrlist(self):
# the parent method doesn't always work
return None
class RegexConverter(BaseConverter):
""" Enables Regex matching on endpoints
"""
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
def redirect_url(referrer=None):
if not referrer:
referrer = request.referrer
return request.args.get('next') or referrer or url_for('index')
def error_handler(e):
try:
if isinstance(e, HTTPException):
status_code = e.code
message = e.description if e.description != type(e).description else None
tb = None
else:
status_code = httplib.INTERNAL_SERVER_ERROR
message = None
tb = traceback.format_exc() if current_user.admin else None
if request.is_xhr or request.accept_mimetypes.best in ['application/json', 'text/javascript']:
response = {
'message': message,
'traceback': tb
}
else:
response = render_template('errors/error.html',
title=httplib.responses[status_code],
status_code=status_code,
message=message,
traceback=tb)
except HTTPException as e2:
return error_handler(e2)
return response, status_code
def create_app(config=None):
app = Application(__name__)
app.config.from_object('realms.config')
app.url_map.converters['regex'] = RegexConverter
app.url_map.strict_slashes = False
login_manager.init_app(app)
db.init_app(app)
cache.init_app(app)
assets.init_app(app)
search.init_app(app)
ldap.init_app(app)
db.Model = declarative_base(metaclass=HookModelMeta, cls=HookMixin)
for status_code in httplib.responses:
if status_code >= 400:
app.register_error_handler(status_code, error_handler)
@app.before_request
def init_g():
g.assets = dict(css=['main.css'], js=['main.js'])
@app.template_filter('datetime')
def _jinja2_filter_datetime(ts):
return time.strftime('%b %d, %Y %I:%M %p', time.localtime(ts))
@app.template_filter('b64encode')
def _jinja2_filter_b64encode(s):
return base64.urlsafe_b64encode(s).rstrip("=")
@app.errorhandler(404)
def page_not_found(e):
return render_template('errors/404.html'), 404
if app.config.get('RELATIVE_PATH'):
@app.route("/")
def root():
return redirect(url_for(app.config.get('ROOT_ENDPOINT')))
app.discover()
# This will be removed at some point
with app.app_context():
if app.config.get('DB_URI'):
db.metadata.create_all(db.get_engine(app))
return app
# Init plugins here if possible
login_manager = LoginManager()
db = SQLAlchemy()
cache = Cache()
assets = Assets()
search = Search()
ldap = MyLDAPLoginManager()
assets.register('main.js',
'vendor/jquery/dist/jquery.js',
'vendor/components-bootstrap/js/bootstrap.js',
'vendor/handlebars/handlebars.js',
'vendor/js-yaml/dist/js-yaml.js',
'vendor/marked/lib/marked.js',
'js/html-sanitizer-minified.js', # don't minify?
'vendor/highlightjs/highlight.pack.js',
'vendor/parsleyjs/dist/parsley.js',
'vendor/datatables/media/js/jquery.dataTables.js',
'vendor/datatables-plugins/integration/bootstrap/3/dataTables.bootstrap.js',
'js/hbs-helpers.js',
'js/mdr.js',
'js/main.js')
assets.register('main.css',
'vendor/bootswatch-dist/css/bootstrap.css',
'vendor/components-font-awesome/css/font-awesome.css',
'vendor/highlightjs/styles/github.css',
'vendor/datatables-plugins/integration/bootstrap/3/dataTables.bootstrap.css',
'css/style.css')
from functools import update_wrapper
def with_appcontext(f):
"""Wraps a callback so that it's guaranteed to be executed with the
script's application context. If callbacks are registered directly
to the ``app.cli`` object then they are wrapped with this function
by default unless it's disabled.
"""
@click.pass_context
def decorator(__ctx, *args, **kwargs):
with create_app().app_context():
return __ctx.invoke(f, *args, **kwargs)
return update_wrapper(decorator, f)
class AppGroup(click.Group):
"""This works similar to a regular click :class:`~click.Group` but it
changes the behavior of the :meth:`command` decorator so that it
automatically wraps the functions in :func:`with_appcontext`.
Not to be confused with :class:`FlaskGroup`.
"""
def command(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
unless it's disabled by passing ``with_appcontext=False``.
"""
wrap_for_ctx = kwargs.pop('with_appcontext', True)
def decorator(f):
if wrap_for_ctx:
f = with_appcontext(f)
return click.Group.command(self, *args, **kwargs)(f)
return decorator
def group(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it defaults the group class to
:class:`AppGroup`.
"""
kwargs.setdefault('cls', AppGroup)
return click.Group.group(self, *args, **kwargs)
flask_cli = AppGroup()
@flask_cli.group()
def cli():
pass
| tanglu-org/tgl-realms | realms/__init__.py | Python | gpl-2.0 | 9,484 |
# -*- coding: utf-8 -*-
import attr
from navmazing import NavigateToAttribute
from widgetastic.widget import View, NoSuchElementException, Text
from widgetastic_manageiq import (
Accordion,
BreadCrumb,
ManageIQTree,
PaginationPane,
SummaryTable,
Table
)
from widgetastic_patternfly import (
Button,
Dropdown,
FlashMessages
)
from cfme.base.ui import BaseLoggedInPage
from cfme.common import WidgetasticTaggable, PolicyProfileAssignable
from cfme.exceptions import ItemNotFound
from cfme.utils.appliance.implementations.ui import CFMENavigateStep, navigator, navigate_to
from cfme.modeling.base import BaseCollection, BaseEntity
class StorageManagerToolbar(View):
"""The toolbar on the Storage Manager or Provider page"""
configuration = Dropdown('Configuration')
policy = Dropdown('Policy')
class StorageManagerDetailsToolbar(View):
"""The toolbar on the Storage Manager or Provider detail page"""
reload = Button(title='Reload Current Display')
configuration = Dropdown('Configuration')
policy = Dropdown('Policy')
monitoring = Dropdown('Monitoring')
download = Button(title='Download summary in PDF format')
class StorageManagerEntities(View):
"""The entities on the main list Storage Manager or Provider page"""
table = Table(".//div[@id='list_grid' or @class='miq-data-table']/table")
class StorageManagerDetailsEntities(View):
"""The entities on the Storage Manager or Provider details page"""
breadcrumb = BreadCrumb()
properties = SummaryTable('Properties')
relationships = SummaryTable('Relationships')
smart_management = SummaryTable('Smart Management')
status = SummaryTable('Status')
class StorageManagerDetailsAccordion(View):
"""The accordion on the Storage Manager or Provider details page"""
@View.nested
class properties(Accordion): # noqa
tree = ManageIQTree()
@View.nested
class relationships(Accordion): # noqa
tree = ManageIQTree()
class StorageManagerView(BaseLoggedInPage):
"""A base view for all the Storage Manager or Provider pages"""
title = Text('.//div[@id="center_div" or @id="main-content"]//h1')
flash = FlashMessages(
'.//div[@id="flash_msg_div"]/div[@id="flash_text_div" or '
'contains(@class, "flash_text_div")]')
@property
def in_manager(self):
navigation_path = self.context['object'].navigation_path
return(
self.logged_in_as_current_user and
self.navigation.currently_selected == navigation_path)
class StorageManagerAllView(StorageManagerView):
"""The all Storage Manager or Provider page"""
@property
def is_displayed(self):
return (
self.in_manager and
self.title.text in ('Storage Managers', self.context['object'].manager_type))
toolbar = View.nested(StorageManagerToolbar)
entities = View.nested(StorageManagerEntities)
paginator = PaginationPane()
class ProviderStorageManagerAllView(StorageManagerAllView):
@property
def is_displayed(self):
return (
self.logged_in_as_current_user and
self.navigation.currently_selected == ['Compute', 'Clouds', 'Providers'] and
self.title.text == '{} (All Storage Managers)'.format(self.context['object'].name)
)
class StorageManagerDetailsView(StorageManagerView):
"""The details page for Storage Manager or Provider"""
@property
def is_displayed(self):
expected_title = '{} (Summary)'.format(self.context['object'].name)
return(
self.title.text == expected_title and
self.entities.breadcrumb.active_location == expected_title)
toolbar = View.nested(StorageManagerDetailsToolbar)
sidebar = View.nested(StorageManagerDetailsAccordion)
entities = View.nested(StorageManagerDetailsEntities)
@attr.s
class StorageManager(BaseEntity, WidgetasticTaggable, PolicyProfileAssignable):
""" Model of an storage manager in cfme
Args:
collection: Instance of collection
name: Name of the object manager.
provider: Provider
"""
name = attr.ib()
provider = attr.ib()
storage_title = 'Storage Manager'
@property
def navigation_path(self):
return self.parent.navigation_path
@property
def manager_type(self):
return self.parent.manager_type
def refresh(self, cancel=False):
"""Refresh storage manager"""
view = navigate_to(self, 'Details')
view.toolbar.configuration.item_select('Refresh Relationships and Power States',
handle_alert=not cancel)
if not cancel:
msg = "Refresh Provider initiated for 1 {} from the CFME Database".format(
self.storage_title)
view.flash.assert_success_message(msg)
def delete(self, wait=True):
"""Delete storage manager"""
view = navigate_to(self, 'Details')
view.toolbar.configuration.item_select(
'Remove this {}'.format(self.storage_title), handle_alert=True)
msg = "Delete initiated for 1 {} from the CFME Database".format(self.storage_title)
view.flash.assert_success_message(msg)
@attr.s
class BlockManagerCollection(BaseCollection):
"""Collection object [block manager] for the :py:class:'cfme.storage.manager'"""
ENTITY = StorageManager
manager_type = 'Block Storage Managers'
navigation_path = ['Storage', 'Block Storage', 'Managers']
@attr.s
class ObjectManagerCollection(BaseCollection):
"""Collection object [object manager] for the :py:class:'cfme.storage.manager'"""
ENTITY = StorageManager
manager_type = 'Object Storage Managers'
navigation_path = ['Storage', 'Object Storage', 'Managers']
@navigator.register(BlockManagerCollection, 'All')
@navigator.register(ObjectManagerCollection, 'All')
class StorageManagerAll(CFMENavigateStep):
VIEW = StorageManagerAllView
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
def step(self, *args, **kwargs):
self.prerequisite_view.navigation.select(*self.obj.navigation_path)
@navigator.register(StorageManager, 'Details')
class StorageManagerDetails(CFMENavigateStep):
VIEW = StorageManagerDetailsView
prerequisite = NavigateToAttribute('parent', 'All')
def step(self, *args, **kwargs):
try:
row = self.prerequisite_view.paginator.find_row_on_pages(
self.prerequisite_view.entities.table, Name=self.obj.name)
row.click()
except NoSuchElementException:
raise ItemNotFound('Could not locate {}'.format(self.obj.name))
| jkandasa/integration_tests | cfme/storage/manager.py | Python | gpl-2.0 | 6,720 |
from higgsdataset import HiggsDataset
from pylearn2.termination_criteria import EpochCounter
from pylearn2.testing.skip import skip_if_no_data
from pylearn2.config import yaml_parse
with open('mlp.test.yaml', 'r') as f:
train = f.read()
hyper_params = {'train_stop': 50,
'valid_start':51,
'valid_stop': 100,
'dim_h0': 5,
# 'n_feat': ,
'max_epochs': 1,
'save_path': '../junk'}
train = train % (hyper_params)
train = yaml_parse.load(train)
train.main_loop()
| Corei13/descartes | code/unit.py | Python | mit | 558 |
#!/usr/bin/env python
import sys, os
rep=os.path.dirname(os.path.abspath(__file__))
installDir=os.path.join(rep,'..')
sys.path.insert(0,installDir)
from PyQt4 import QtGui,QtCore,QtSql
from Base.dataBase import Base
def completeDatabase(fichier,table,enregistrement):
maBase=Base(fichier)
maBase.initialise()
nomTable="ma"+str(table)
matable=getattr(maBase,nomTable)
model= QtSql.QSqlTableModel()
model.setTable(matable.nom)
nbCols=model.columnCount() -1
if table == "TableGroupesRef" : nbCols==nbCols+1
if len(enregistrement) != nbCols :
print "mauvais nb de valeurs"
print "Attention, ne pas renter d'Id"
if table == "TableGroupesRef" : matable.insereLigne(enregistrement)
else : matable.insereLigneAutoId(enregistrement)
maBase.close()
if __name__ == "__main__":
from optparse import OptionParser
p=OptionParser()
p.add_option('-d',dest='database',default="myMesh.db",help='nom de la database')
p.add_option('-t',dest='table',help='nom de la table a completer')
options, args = p.parse_args()
if options.table==None :
print "table obligatoire"
exit()
if options.table not in ("TableMaillages","TableMailleurs","TableGroupesRef","TableVersions") :
print "la table doit etre : TableMaillages ou TableMailleurs ou TableGroupesRef ou TableVersions"
exit()
enregistrement=tuple(args)
completeDatabase(options.database,options.table,enregistrement)
| FedoraScientific/salome-smesh | src/Tools/Verima/ajoutEnreg.py | Python | lgpl-2.1 | 1,532 |
from django.conf.urls import include, url
from django.contrib.auth import views as auth_views
from django.contrib.auth.forms import AuthenticationForm
from django.views.generic import RedirectView, TemplateView
from .views import CreateAccount, SelectGroup
urlpatterns = [
url(r'^$',
RedirectView.as_view(
pattern_name='auth:login',
permanent=False)),
url(r'^login/$',
auth_views.login,
{'template_name': 'users/login.html'},
name='login'),
url(r'^logout/$',
auth_views.logout,
{'template_name': 'users/logout.html',
'extra_context':
{'form': AuthenticationForm}},
name='logout'),
url(r'^create/$',
CreateAccount.as_view(),
name='create'),
url(r'^create/(?P<slug>[\w\d\-]+)/group/$',
SelectGroup.as_view(),
name='create_select_group'),
url(r'^create/done/$',
TemplateView.as_view(
template_name=(
'users/user_create_done.html')),
name='create_done'),
]
| evonaut/bolzplatz | bolzplatz/users/urls.py | Python | gpl-2.0 | 1,057 |
def smile():
return ":)"
def frown():
return ":("
| muneeb131/test | awesome/__init__.py | Python | gpl-3.0 | 59 |
from sympy import Integer
from sympy.core.compatibility import ordered_iter
from threading import RLock
# it is sufficient to import "pyglet" here once
try:
from pyglet.gl import *
except:
raise ImportError("pyglet is required for plotting.\n visit http://www.pyglet.org/")
from plot_object import PlotObject
from plot_axes import PlotAxes
from plot_window import PlotWindow
from plot_mode import PlotMode
import plot_modes
from time import sleep
from os import getcwd, listdir
from util import parse_option_string
from sympy.geometry.entity import GeometryEntity
class Plot(object):
"""
Plot Examples
=============
See examples/plotting.py for many more examples.
>>> from sympy import Plot
>>> from sympy.abc import x, y, z
>>> Plot(x*y**3-y*x**3)
>>> p = Plot()
>>> p[1] = x*y
>>> p[1].color = z, (0.4,0.4,0.9), (0.9,0.4,0.4)
>>> p = Plot()
>>> p[1] = x**2+y**2
>>> p[2] = -x**2-y**2
Variable Intervals
==================
The basic format is [var, min, max, steps], but the
syntax is flexible and arguments left out are taken
from the defaults for the current coordinate mode:
>>> Plot(x**2) # implies [x,-5,5,100]
>>> Plot(x**2, [], []) # [x,-1,1,40], [y,-1,1,40]
>>> Plot(x**2-y**2, [100], [100]) # [x,-1,1,100], [y,-1,1,100]
>>> Plot(x**2, [x,-13,13,100])
>>> Plot(x**2, [-13,13]) # [x,-13,13,100]
>>> Plot(x**2, [x,-13,13]) # [x,-13,13,100]
>>> Plot(1*x, [], [x], mode='cylindrical')
... # [unbound_theta,0,2*Pi,40], [x,-1,1,20]
Coordinate Modes
================
Plot supports several curvilinear coordinate modes, and
they independent for each plotted function. You can specify
a coordinate mode explicitly with the 'mode' named argument,
but it can be automatically determined for Cartesian or
parametric plots, and therefore must only be specified for
polar, cylindrical, and spherical modes.
Specifically, Plot(function arguments) and Plot[n] =
(function arguments) will interpret your arguments as a
Cartesian plot if you provide one function and a parametric
plot if you provide two or three functions. Similarly, the
arguments will be interpreted as a curve is one variable is
used, and a surface if two are used.
Supported mode names by number of variables:
1: parametric, cartesian, polar
2: parametric, cartesian, cylindrical = polar, spherical
>>> Plot(1, mode='spherical')
Calculator-like Interface
=========================
>>> p = Plot(visible=False)
>>> f = x**2
>>> p[1] = f
>>> p[2] = f.diff(x)
>>> p[3] = f.diff(x).diff(x)
>>> p
[1]: x**2, 'mode=cartesian'
[2]: 2*x, 'mode=cartesian'
[3]: 2, 'mode=cartesian'
>>> p.show()
>>> p.clear()
>>> p
<blank plot>
>>> p[1] = x**2+y**2
>>> p[1].style = 'solid'
>>> p[2] = -x**2-y**2
>>> p[2].style = 'wireframe'
>>> p[1].color = z, (0.4,0.4,0.9), (0.9,0.4,0.4)
>>> p[1].style = 'both'
>>> p[2].style = 'both'
>>> p.close()
Plot Window Keyboard Controls
=============================
Screen Rotation:
X,Y axis Arrow Keys, A,S,D,W, Numpad 4,6,8,2
Z axis Q,E, Numpad 7,9
Model Rotation:
Z axis Z,C, Numpad 1,3
Zoom: R,F, PgUp,PgDn, Numpad +,-
Reset Camera: X, Numpad 5
Camera Presets:
XY F1
XZ F2
YZ F3
Perspective F4
Sensitivity Modifier: SHIFT
Axes Toggle:
Visible F5
Colors F6
Close Window: ESCAPE
=============================
"""
def __init__(self, *fargs, **win_args):
"""
Positional Arguments
====================
Any given positional arguments are used to
initialize a plot function at index 1. In
other words...
>>> from sympy.core import Symbol
>>> from sympy.abc import x
>>> p = Plot(x**2, visible=False)
...is equivalent to...
>>> p = Plot(visible=False)
>>> p[1] = x**2
Note that in earlier versions of the plotting
module, you were able to specify multiple
functions in the initializer. This functionality
has been dropped in favor of better automatic
plot plot_mode detection.
Named Arguments
===============
axes
An option string of the form
"key1=value1; key2 = value2" which
can use the following options:
style = ordinate
none OR frame OR box OR ordinate
stride = 0.25
val OR (val_x, val_y, val_z)
overlay = True (draw on top of plot)
True OR False
colored = False (False uses Black,
True uses colors
R,G,B = X,Y,Z)
True OR False
label_axes = False (display axis names
at endpoints)
True OR False
visible = True (show immediately
True OR False
The following named arguments are passed as
arguments to window initialization:
antialiasing = True
True OR False
ortho = False
True OR False
invert_mouse_zoom = False
True OR False
"""
self._win_args = win_args
self._window = None
self._render_lock = RLock()
self._functions = {}
self._pobjects = []
self._screenshot = ScreenShot(self)
axe_options = parse_option_string(win_args.pop('axes', ''))
self.axes = PlotAxes(**axe_options)
self._pobjects.append(self.axes)
self[0] = fargs
if win_args.get('visible', True):
self.show()
## Window Interfaces
def show(self):
"""
Creates and displays a plot window, or activates it
(gives it focus) if it has already been created.
"""
if self._window and not self._window.has_exit:
self._window.activate()
else:
self._win_args['visible'] = True
self.axes.reset_resources()
self._window = PlotWindow(self, **self._win_args)
def close(self):
"""
Closes the plot window.
"""
if self._window:
self._window.close()
def saveimage(self, outfile=None, format='', size=(600, 500)):
"""
Saves a screen capture of the plot window to an
image file.
If outfile is given, it can either be a path
or a file object. Otherwise a png image will
be saved to the current working directory.
If the format is omitted, it is determined from
the filename extension.
"""
self._screenshot.save(outfile, format, size)
## Function List Interfaces
def clear(self):
"""
Clears the function list of this plot.
"""
self._render_lock.acquire()
self._functions = {}
self.adjust_all_bounds()
self._render_lock.release()
def __getitem__(self, i):
"""
Returns the function at position i in the
function list.
"""
return self._functions[i]
def __setitem__(self, i, args):
"""
Parses and adds a PlotMode to the function
list.
"""
if not (isinstance(i, (int, Integer)) and i >= 0):
raise ValueError("Function index must "
"be an integer >= 0.")
if isinstance(args, PlotObject):
f = args
else:
if (not ordered_iter(args)) or isinstance(args, GeometryEntity):
args = [args]
if len(args) == 0:
return # no arguments given
kwargs = dict(bounds_callback=self.adjust_all_bounds)
f = PlotMode(*args, **kwargs)
if f:
self._render_lock.acquire()
self._functions[i] = f
self._render_lock.release()
else:
raise ValueError("Failed to parse '%s'."
% ', '.join(str(a) for a in args))
def __delitem__(self, i):
"""
Removes the function in the function list at
position i.
"""
self._render_lock.acquire()
del self._functions[i]
self.adjust_all_bounds()
self._render_lock.release()
def firstavailableindex(self):
"""
Returns the first unused index in the function list.
"""
i = 0
self._render_lock.acquire()
while i in self._functions: i += 1
self._render_lock.release()
return i
def append(self, *args):
"""
Parses and adds a PlotMode to the function
list at the first available index.
"""
self.__setitem__(self.firstavailableindex(), args)
def __len__(self):
"""
Returns the number of functions in the function list.
"""
return len(self._functions)
def __iter__(self):
"""
Allows iteration of the function list.
"""
return self._functions.itervalues()
def __repr__(self):
return str(self)
def __str__(self):
"""
Returns a string containing a new-line separated
list of the functions in the function list.
"""
s = ""
if len(self._functions) == 0:
s += "<blank plot>"
else:
self._render_lock.acquire()
s += "\n".join(["%s[%i]: %s" % ("", i, str(self._functions[i]))
for i in self._functions])
self._render_lock.release()
return s
def adjust_all_bounds(self):
self._render_lock.acquire()
self.axes.reset_bounding_box()
for f in self._functions:
self.axes.adjust_bounds(self._functions[f].bounds)
self._render_lock.release()
def wait_for_calculations(self):
sleep(0)
self._render_lock.acquire()
for f in self._functions:
a = self._functions[f]._get_calculating_verts
b = self._functions[f]._get_calculating_cverts
while a() or b(): sleep(0)
self._render_lock.release()
class ScreenShot:
def __init__(self, plot):
self._plot = plot
self.screenshot_requested = False
self.outfile = None
self.format = ''
self.invisibleMode = False
self.flag = 0
def __nonzero__(self):
if self.screenshot_requested:
return 1
return 0
def _execute_saving(self):
if self.flag <3:
self.flag += 1
return
size_x, size_y = self._plot._window.get_size()
size = size_x*size_y*4*sizeof(c_ubyte)
image = create_string_buffer(size)
glReadPixels(0,0,size_x,size_y, GL_RGBA, GL_UNSIGNED_BYTE, image)
from PIL import Image
im = Image.frombuffer('RGBA',(size_x,size_y),image.raw, 'raw', 'RGBA', 0, 1)
im.transpose(Image.FLIP_TOP_BOTTOM).save(self.outfile, self.format)
self.flag = 0
self.screenshot_requested = False
if self.invisibleMode:
self._plot._window.close()
def save(self, outfile=None, format='', size=(600, 500)):
self.outfile = outfile
self.format = format
self.size = size
self.screenshot_requested = True
if not self._plot._window or self._plot._window.has_exit:
self._plot._win_args['visible'] = False
self._plot._win_args['width'] = size[0]
self._plot._win_args['height'] = size[1]
self._plot.axes.reset_resources()
self._plot._window = PlotWindow(self._plot, **self._plot._win_args)
self.invisibleMode = True
if self.outfile is None:
self.outfile=self._create_unique_path()
print self.outfile
def _create_unique_path(self):
cwd = getcwd()
l = listdir(cwd)
path = ''
i=0
while True:
if not 'plot_%s.png'%i in l:
path = cwd+'/plot_%s.png'%i
break
i+=1
return path
| minrk/sympy | sympy/plotting/plot.py | Python | bsd-3-clause | 12,383 |
# coding=utf-8
# Progetto: Pushetta API
# Indici per il motore di ricerca
from haystack import indexes
from core.models import Channel
'''
class ChannelIndex(indexes.ModelSearchIndex, indexes.Indexable):
class Meta:
model = Channel
def index_queryset(self, using=None):
"""Used when the entire index for model is updated."""
# filter(kind!=PRIVATE).
return self.get_model().objects.filter(hidden=False)
'''
class ChannelIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.EdgeNgramField(document=True, use_template=True)
name = indexes.CharField(model_attr='name')
description = indexes.CharField(model_attr='description')
image = indexes.CharField(model_attr='image')
hidden = indexes.BooleanField(model_attr='hidden')
kind = indexes.IntegerField(model_attr='kind')
subscriptions = indexes.IntegerField(model_attr='subscriptions')
def get_model(self):
return Channel
def index_queryset(self, using=None):
"""Used when the entire index for model is updated."""
# filter(kind!=PRIVATE).
return self.get_model().objects.filter(hidden='false')
| guglielmino/pushetta-api-django | pushetta/core/search_indexes.py | Python | gpl-3.0 | 1,175 |
# coding=utf-8
# Copyright 2020 Hugging Face
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy
def format_time(t):
"Format `t` (in seconds) to (h):mm:ss"
t = int(t)
h, m, s = t // 3600, (t // 60) % 60, t % 60
return f"{h}:{m:02d}:{s:02d}" if h != 0 else f"{m:02d}:{s:02d}"
def html_progress_bar(value, total, prefix, label, width=300):
# docstyle-ignore
return f"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def text_to_html_table(items):
"Put the texts in `items` in an HTML table."
html_code = """<table border="1" class="dataframe">\n"""
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f" <th>{i}</th>\n"
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
elt = f"{elt:.6f}" if isinstance(elt, float) else str(elt)
html_code += f" <td>{elt}</td>\n"
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class NotebookProgressBar:
"""
A progress par for display in a notebook.
Class attributes (overridden by derived classes)
- **warmup** (`int`) -- The number of iterations to do at the beginning while ignoring `update_every`.
- **update_every** (`float`) -- Since calling the time takes some time, we only do it every presumed
`update_every` seconds. The progress bar uses the average time passed up until now to guess the next value
for which it will call the update.
Args:
total (`int`):
The total number of iterations to reach.
prefix (`str`, *optional*):
A prefix to add before the progress bar.
leave (`bool`, *optional*, defaults to `True`):
Whether or not to leave the progress bar once it's completed. You can always call the
[`~utils.notebook.NotebookProgressBar.close`] method to make the bar disappear.
parent ([`~notebook.NotebookTrainingTracker`], *optional*):
A parent object (like [`~utils.notebook.NotebookTrainingTracker`]) that spawns progress bars and handle
their display. If set, the object passed must have a `display()` method.
width (`int`, *optional*, defaults to 300):
The width (in pixels) that the bar will take.
Example:
```python
import time
pbar = NotebookProgressBar(100)
for val in range(100):
pbar.update(val)
time.sleep(0.07)
pbar.update(100)
```"""
warmup = 5
update_every = 0.2
def __init__(
self,
total: int,
prefix: Optional[str] = None,
leave: bool = True,
parent: Optional["NotebookTrainingTracker"] = None,
width: int = 300,
):
self.total = total
self.prefix = "" if prefix is None else prefix
self.leave = leave
self.parent = parent
self.width = width
self.last_value = None
self.comment = None
self.output = None
def update(self, value: int, force_update: bool = False, comment: str = None):
"""
The main method to update the progress bar to `value`.
Args:
value (`int`):
The value to use. Must be between 0 and `total`.
force_update (`bool`, *optional*, defaults to `False`):
Whether or not to force and update of the internal state and display (by default, the bar will wait for
`value` to reach the value it predicted corresponds to a time of more than the `update_every` attribute
since the last update to avoid adding boilerplate).
comment (`str`, *optional*):
A comment to add on the left of the progress bar.
"""
self.value = value
if comment is not None:
self.comment = comment
if self.last_value is None:
self.start_time = self.last_time = time.time()
self.start_value = self.last_value = value
self.elapsed_time = self.predicted_remaining = None
self.first_calls = self.warmup
self.wait_for = 1
self.update_bar(value)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total):
if self.first_calls > 0:
self.first_calls -= 1
current_time = time.time()
self.elapsed_time = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
self.average_time_per_item = self.elapsed_time / (value - self.start_value)
else:
self.average_time_per_item = None
if value >= self.total:
value = self.total
self.predicted_remaining = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
self.predicted_remaining = self.average_time_per_item * (self.total - value)
self.update_bar(value)
self.last_value = value
self.last_time = current_time
if self.average_time_per_item is None:
self.wait_for = 1
else:
self.wait_for = max(int(self.update_every / self.average_time_per_item), 1)
def update_bar(self, value, comment=None):
spaced_value = " " * (len(str(self.total)) - len(str(value))) + str(value)
if self.elapsed_time is None:
self.label = f"[{spaced_value}/{self.total} : < :"
elif self.predicted_remaining is None:
self.label = f"[{spaced_value}/{self.total} {format_time(self.elapsed_time)}"
else:
self.label = f"[{spaced_value}/{self.total} {format_time(self.elapsed_time)} < {format_time(self.predicted_remaining)}"
self.label += f", {1/self.average_time_per_item:.2f} it/s"
self.label += "]" if self.comment is None or len(self.comment) == 0 else f", {self.comment}]"
self.display()
def display(self):
self.html_code = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
self.output = disp.display(disp.HTML(self.html_code), display_id=True)
else:
self.output.update(disp.HTML(self.html_code))
def close(self):
"Closes the progress bar."
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(""))
class NotebookTrainingTracker(NotebookProgressBar):
"""
An object tracking the updates of an ongoing training with progress bars and a nice table reporting metrics.
Args:
num_steps (`int`): The number of steps during training. column_names (`List[str]`, *optional*):
The list of column names for the metrics table (will be inferred from the first call to
[`~utils.notebook.NotebookTrainingTracker.write_line`] if not set).
"""
def __init__(self, num_steps, column_names=None):
super().__init__(num_steps)
self.inner_table = None if column_names is None else [column_names]
self.child_bar = None
def display(self):
self.html_code = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
self.output = disp.display(disp.HTML(self.html_code), display_id=True)
else:
self.output.update(disp.HTML(self.html_code))
def write_line(self, values):
"""
Write the values in the inner table.
Args:
values (`Dict[str, float]`): The values to display.
"""
if self.inner_table is None:
self.inner_table = [list(values.keys()), list(values.values())]
else:
columns = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(key)
self.inner_table[0] = columns
self.inner_table.append([values[c] for c in columns])
def add_child(self, total, prefix=None, width=300):
"""
Add a child progress bar displayed under the table of metrics. The child progress bar is returned (so it can be
easily updated).
Args:
total (`int`): The number of iterations for the child progress bar.
prefix (`str`, *optional*): A prefix to write on the left of the progress bar.
width (`int`, *optional*, defaults to 300): The width (in pixels) of the progress bar.
"""
self.child_bar = NotebookProgressBar(total, prefix=prefix, parent=self, width=width)
return self.child_bar
def remove_child(self):
"""
Closes the child progress bar.
"""
self.child_bar = None
self.display()
class NotebookProgressCallback(TrainerCallback):
"""
A [`TrainerCallback`] that displays the progress of training or evaluation, optimized for Jupyter Notebooks or
Google colab.
"""
def __init__(self):
self.training_tracker = None
self.prediction_bar = None
self._force_next_update = False
def on_train_begin(self, args, state, control, **kwargs):
self.first_column = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step"
self.training_loss = 0
self.last_log = 0
column_names = [self.first_column] + ["Training Loss"]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss")
self.training_tracker = NotebookTrainingTracker(state.max_steps, column_names)
def on_step_end(self, args, state, control, **kwargs):
epoch = int(state.epoch) if int(state.epoch) == state.epoch else f"{state.epoch:.2f}"
self.training_tracker.update(
state.global_step + 1,
comment=f"Epoch {epoch}/{state.num_train_epochs}",
force_update=self._force_next_update,
)
self._force_next_update = False
def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs):
if not isinstance(eval_dataloader.dataset, collections.abc.Sized):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
self.prediction_bar = self.training_tracker.add_child(len(eval_dataloader))
else:
self.prediction_bar = NotebookProgressBar(len(eval_dataloader))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def on_log(self, args, state, control, logs=None, **kwargs):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
values = {"Training Loss": logs["loss"]}
# First column is necessarily Step sine we're not in epoch eval strategy
values["Step"] = state.global_step
self.training_tracker.write_line(values)
def on_evaluate(self, args, state, control, metrics=None, **kwargs):
if self.training_tracker is not None:
values = {"Training Loss": "No log", "Validation Loss": "No log"}
for log in reversed(state.log_history):
if "loss" in log:
values["Training Loss"] = log["loss"]
break
if self.first_column == "Epoch":
values["Epoch"] = int(state.epoch)
else:
values["Step"] = state.global_step
metric_key_prefix = "eval"
for k in metrics:
if k.endswith("_loss"):
metric_key_prefix = re.sub(r"\_loss$", "", k)
_ = metrics.pop("total_flos", None)
_ = metrics.pop("epoch", None)
_ = metrics.pop(f"{metric_key_prefix}_runtime", None)
_ = metrics.pop(f"{metric_key_prefix}_samples_per_second", None)
_ = metrics.pop(f"{metric_key_prefix}_steps_per_second", None)
for k, v in metrics.items():
if k == f"{metric_key_prefix}_loss":
values["Validation Loss"] = v
else:
splits = k.split("_")
name = " ".join([part.capitalize() for part in splits[1:]])
values[name] = v
self.training_tracker.write_line(values)
self.training_tracker.remove_child()
self.prediction_bar = None
# Evaluation takes a long time so we should force the next update.
self._force_next_update = True
def on_train_end(self, args, state, control, **kwargs):
self.training_tracker.update(
state.global_step, comment=f"Epoch {int(state.epoch)}/{state.num_train_epochs}", force_update=True
)
self.training_tracker = None
| huggingface/transformers | src/transformers/utils/notebook.py | Python | apache-2.0 | 14,562 |
# -*- coding: utf-8 -*-
# Copyright (c) 2006 - 2014 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing the QScintilla Calltips configuration page.
"""
from __future__ import unicode_literals
from PyQt5.Qsci import QsciScintilla
from .ConfigurationPageBase import ConfigurationPageBase
from .Ui_EditorCalltipsQScintillaPage import Ui_EditorCalltipsQScintillaPage
import Preferences
class EditorCalltipsQScintillaPage(ConfigurationPageBase,
Ui_EditorCalltipsQScintillaPage):
"""
Class implementing the QScintilla Calltips configuration page.
"""
def __init__(self):
"""
Constructor
"""
super(EditorCalltipsQScintillaPage, self).__init__()
self.setupUi(self)
self.setObjectName("EditorCalltipsQScintillaPage")
# set initial values
ctContext = Preferences.getEditor("CallTipsStyle")
if ctContext == QsciScintilla.CallTipsNoContext:
self.ctNoContextButton.setChecked(True)
elif ctContext == QsciScintilla.CallTipsNoAutoCompletionContext:
self.ctNoAutoCompletionButton.setChecked(True)
elif ctContext == QsciScintilla.CallTipsContext:
self.ctContextButton.setChecked(True)
def save(self):
"""
Public slot to save the EditorCalltips configuration.
"""
if self.ctNoContextButton.isChecked():
Preferences.setEditor(
"CallTipsStyle", QsciScintilla.CallTipsNoContext)
elif self.ctNoAutoCompletionButton.isChecked():
Preferences.setEditor(
"CallTipsStyle", QsciScintilla.CallTipsNoAutoCompletionContext)
elif self.ctContextButton.isChecked():
Preferences.setEditor(
"CallTipsStyle", QsciScintilla.CallTipsContext)
def create(dlg):
"""
Module function to create the configuration page.
@param dlg reference to the configuration dialog
@return reference to the instantiated page (ConfigurationPageBase)
"""
page = EditorCalltipsQScintillaPage()
return page
| davy39/eric | Preferences/ConfigurationPages/EditorCalltipsQScintillaPage.py | Python | gpl-3.0 | 2,134 |
"""An OpRegularizer that applies L1 regularization on batch-norm gammas."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from morph_net.framework import generic_regularizers
from morph_net.framework import tpu_util
import tensorflow.compat.v1 as tf
class GammaL1Regularizer(generic_regularizers.OpRegularizer):
"""An OpRegularizer that L1-regularizes batch-norm gamma."""
def __init__(self, gamma, gamma_threshold):
"""Creates an instance.
Args:
gamma: A tf.Tensor of shape (n_channels,) with the gammas.
gamma_threshold: A float scalar, the threshold above which a gamma is
considered 'alive'.
"""
self._gamma = tpu_util.maybe_convert_to_variable(gamma)
self._gamma_threshold = gamma_threshold
abs_gamma = tf.abs(self._gamma)
self._alive_vector = abs_gamma > gamma_threshold
self._regularization_vector = abs_gamma
@property
def regularization_vector(self):
return self._regularization_vector
@property
def alive_vector(self):
"""Returns a tf.Tensor of shape (n_channels,) with alive bits."""
return self._alive_vector
| google-research/morph-net | morph_net/op_regularizers/gamma_l1_regularizer.py | Python | apache-2.0 | 1,168 |
from django.conf.urls import patterns, include, url
from mainview import mainview
urlpatterns = patterns(
'',
url(r'^(?:index|index.html)?$', mainview.index),
url(r'^list/(\d+)$', mainview.list),
url(r'^show/(\d+)$',mainview.show),
url(r'.*',mainview.notfound),
) | marktrue/DjangoProTest | DjangoProTest/mainview/urls.py | Python | gpl-2.0 | 284 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.