repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
saurabh6790/omnit-app
|
refs/heads/master
|
setup/doctype/quotation_lost_reason/test_quotation_lost_reason.py
|
30
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
test_records = [[{"doctype":"Quotation Lost Reason", "order_lost_reason": "_Test Quotation Lost Reason"}]]
|
emesene/emesene
|
refs/heads/master
|
emesene/e3/papylib/papyon/papyon/service/AddressBook/scenario/base.py
|
6
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Johann Prieur <johann.prieur@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from papyon.util.async import *
__all__ = ['BaseScenario', 'Scenario']
class BaseScenario(object):
def __init__(self, partner_scenario, callback, errback):
self._scenario = partner_scenario
self._callback = callback
self._errback = errback
def __set_scenario(self, scenario):
self._scenario = scenario
def __get_scenario(self):
return self._scenario
scenario = property(__get_scenario, __set_scenario)
def callback(self, *args):
run(self._callback, *args)
def errback(self, *args):
run(self._errback, *args)
def execute(self):
pass
def __call__(self):
return self.execute()
class Scenario(object):
"""Scenario label"""
INITIAL = "Initial"
TIMER = "Timer"
CONTACT_SAVE = "ContactSave"
GROUP_SAVE = "GroupSave"
BLOCK_UNBLOCK = "BlockUnblock"
CONTACT_MSGR_API = "ContactMsgrAPI"
MOBILE_CONTACT_MSGR_API = "MobileContactMsgrAPI"
MESSENGER_PENDING_LIST = "MessengerPendingList"
|
altsen/diandiyun-platform
|
refs/heads/master
|
cms/djangoapps/course_creators/admin.py
|
232
|
"""
django admin page for the course creators table
"""
from course_creators.models import CourseCreator, update_creator_state, send_user_notification, send_admin_notification
from course_creators.views import update_course_creator_group
from ratelimitbackend import admin
from django.conf import settings
from django.dispatch import receiver
from edxmako.shortcuts import render_to_string
from django.core.mail import send_mail
from smtplib import SMTPException
import logging
log = logging.getLogger("studio.coursecreatoradmin")
def get_email(obj):
""" Returns the email address for a user """
return obj.user.email
get_email.short_description = 'email'
class CourseCreatorAdmin(admin.ModelAdmin):
"""
Admin for the course creator table.
"""
# Fields to display on the overview page.
list_display = ['username', get_email, 'state', 'state_changed', 'note']
readonly_fields = ['username', 'state_changed']
# Controls the order on the edit form (without this, read-only fields appear at the end).
fieldsets = (
(None, {
'fields': ['username', 'state', 'state_changed', 'note']
}),
)
# Fields that filtering support
list_filter = ['state', 'state_changed']
# Fields that search supports.
search_fields = ['user__username', 'user__email', 'state', 'note']
# Turn off the action bar (we have no bulk actions)
actions = None
def username(self, inst):
"""
Returns the username for a given user.
Implemented to make sorting by username instead of by user object.
"""
return inst.user.username
username.admin_order_field = 'user__username'
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
def has_change_permission(self, request, obj=None):
return request.user.is_staff
def save_model(self, request, obj, form, change):
# Store who is making the request.
obj.admin = request.user
obj.save()
admin.site.register(CourseCreator, CourseCreatorAdmin)
@receiver(update_creator_state, sender=CourseCreator)
def update_creator_group_callback(sender, **kwargs):
"""
Callback for when the model's creator status has changed.
"""
user = kwargs['user']
updated_state = kwargs['state']
update_course_creator_group(kwargs['caller'], user, updated_state == CourseCreator.GRANTED)
@receiver(send_user_notification, sender=CourseCreator)
def send_user_notification_callback(sender, **kwargs):
"""
Callback for notifying user about course creator status change.
"""
user = kwargs['user']
updated_state = kwargs['state']
studio_request_email = settings.FEATURES.get('STUDIO_REQUEST_EMAIL', '')
context = {'studio_request_email': studio_request_email}
subject = render_to_string('emails/course_creator_subject.txt', context)
subject = ''.join(subject.splitlines())
if updated_state == CourseCreator.GRANTED:
message_template = 'emails/course_creator_granted.txt'
elif updated_state == CourseCreator.DENIED:
message_template = 'emails/course_creator_denied.txt'
else:
# changed to unrequested or pending
message_template = 'emails/course_creator_revoked.txt'
message = render_to_string(message_template, context)
try:
user.email_user(subject, message, studio_request_email)
except:
log.warning("Unable to send course creator status e-mail to %s", user.email)
@receiver(send_admin_notification, sender=CourseCreator)
def send_admin_notification_callback(sender, **kwargs):
"""
Callback for notifying admin of a user in the 'pending' state.
"""
user = kwargs['user']
studio_request_email = settings.FEATURES.get('STUDIO_REQUEST_EMAIL', '')
context = {'user_name': user.username, 'user_email': user.email}
subject = render_to_string('emails/course_creator_admin_subject.txt', context)
subject = ''.join(subject.splitlines())
message = render_to_string('emails/course_creator_admin_user_pending.txt', context)
try:
send_mail(
subject,
message,
studio_request_email,
[studio_request_email],
fail_silently=False
)
except SMTPException:
log.warning("Failure sending 'pending state' e-mail for %s to %s", user.email, studio_request_email)
|
ajgallegog/gem5_arm
|
refs/heads/master
|
src/mem/slicc/ast/AssignStatementAST.py
|
51
|
# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.StatementAST import StatementAST
class AssignStatementAST(StatementAST):
def __init__(self, slicc, lvalue, rvalue):
super(AssignStatementAST, self).__init__(slicc)
self.lvalue = lvalue
self.rvalue = rvalue
def __repr__(self):
return "[AssignStatementAST: %r := %r]" % (self.lvalue, self.rvalue)
def generate(self, code, return_type):
lcode = self.slicc.codeFormatter()
rcode = self.slicc.codeFormatter()
ltype = self.lvalue.generate(lcode)
rtype = self.rvalue.generate(rcode)
code("$lcode = $rcode;")
if not (ltype == rtype or (ltype.isInterface and ltype['interface'] == rtype.ident)):
# FIXME - beckmann
# the following if statement is a hack to allow NetDest
# objects to be assigned to Sets this allows for the
# previous NetworkMessage Destiantion 'Set class' to
# migrate to the new NetworkMessage Destiantion 'NetDest
# class'
if str(ltype) != "NetDest" and str(rtype) != "Set":
self.error("Assignment type mismatch '%s' and '%s'",
ltype, rtype)
|
jsirois/pants
|
refs/heads/master
|
src/python/pants/base/build_environment_test.py
|
4
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from pants.base.build_environment import get_pants_cachedir
from pants.util.contextutil import environment_as, temporary_file
def test_get_pants_cachedir() -> None:
with environment_as(XDG_CACHE_HOME=""):
assert os.path.expanduser("~/.cache/pants") == get_pants_cachedir()
with temporary_file() as temp, environment_as(XDG_CACHE_HOME=temp.name):
assert os.path.join(temp.name, "pants") == get_pants_cachedir()
|
iidx/volatility
|
refs/heads/master
|
volatility/plugins/registry/hivelist.py
|
44
|
# Volatility
# Copyright (C) 2008-2013 Volatility Foundation
# Copyright (c) 2008 Brendan Dolan-Gavitt <bdolangavitt@wesleyan.edu>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: AAron Walters and Brendan Dolan-Gavitt
@license: GNU General Public License 2.0
@contact: awalters@4tphi.net,bdolangavitt@wesleyan.edu
@organization: Volatility Foundation
"""
#pylint: disable-msg=C0111
import volatility.plugins.registry.hivescan as hs
import volatility.obj as obj
import volatility.utils as utils
import volatility.cache as cache
class HiveList(hs.HiveScan):
"""Print list of registry hives.
You can supply the offset of a specific hive. Otherwise
this module will use the results from hivescan automatically.
"""
# Declare meta information associated with this plugin
meta_info = {}
meta_info['author'] = 'Brendan Dolan-Gavitt'
meta_info['copyright'] = 'Copyright (c) 2007,2008 Brendan Dolan-Gavitt'
meta_info['contact'] = 'bdolangavitt@wesleyan.edu'
meta_info['license'] = 'GNU General Public License 2.0'
meta_info['url'] = 'http://moyix.blogspot.com/'
meta_info['os'] = 'WIN_32_XP_SP2'
meta_info['version'] = '1.0'
def render_text(self, outfd, result):
self.table_header(outfd, [('Virtual', '[addrpad]'),
('Physical', '[addrpad]'),
('Name', ''),
])
hive_offsets = []
for hive in result:
if hive.Hive.Signature == 0xbee0bee0 and hive.obj_offset not in hive_offsets:
try:
name = str(hive.FileFullPath or '') or str(hive.FileUserName or '') or str(hive.HiveRootPath or '') or "[no name]"
except AttributeError:
name = "[no name]"
# Spec of 10 rather than 8 width, since the # puts 0x at the start, which is included in the width
self.table_row(outfd, hive.obj_offset, hive.obj_vm.vtop(hive.obj_offset), name)
hive_offsets.append(hive.obj_offset)
@cache.CacheDecorator("tests/hivelist")
def calculate(self):
flat = utils.load_as(self._config, astype = 'physical')
addr_space = utils.load_as(self._config)
hives = hs.HiveScan.calculate(self)
## The first hive is normally given in physical address space
## - so we instantiate it using the flat address space. We
## then read the Flink of the list to locate the address of
## the first hive in virtual address space. hmm I wish we
## could go from physical to virtual memory easier.
for offset in hives:
hive = obj.Object("_CMHIVE", int(offset), flat, native_vm = addr_space)
if hive.HiveList.Flink.v():
start_hive_offset = hive.HiveList.Flink.v() - addr_space.profile.get_obj_offset('_CMHIVE', 'HiveList')
## Now instantiate the first hive in virtual address space as normal
start_hive = obj.Object("_CMHIVE", start_hive_offset, addr_space)
for hive in start_hive.HiveList:
yield hive
|
chinmaygarde/depot_tools
|
refs/heads/master
|
third_party/logilab/common/testlib.py
|
64
|
# -*- coding: utf-8 -*-
# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Run tests.
This will find all modules whose name match a given prefix in the test
directory, and run them. Various command line options provide
additional facilities.
Command line options:
-v verbose -- run tests in verbose mode with output to stdout
-q quiet -- don't print anything except if a test fails
-t testdir -- directory where the tests will be found
-x exclude -- add a test to exclude
-p profile -- profiled execution
-d dbc -- enable design-by-contract
-m match -- only run test matching the tag pattern which follow
If no non-option arguments are present, prefixes used are 'test',
'regrtest', 'smoketest' and 'unittest'.
"""
from __future__ import print_function
__docformat__ = "restructuredtext en"
# modified copy of some functions from test/regrtest.py from PyXml
# disable camel case warning
# pylint: disable=C0103
import sys
import os, os.path as osp
import re
import traceback
import inspect
import difflib
import tempfile
import math
import warnings
from shutil import rmtree
from operator import itemgetter
from itertools import dropwhile
from inspect import isgeneratorfunction
from six import string_types
from six.moves import builtins, range, configparser, input
from logilab.common.deprecation import deprecated
import unittest as unittest_legacy
if not getattr(unittest_legacy, "__package__", None):
try:
import unittest2 as unittest
from unittest2 import SkipTest
except ImportError:
raise ImportError("You have to install python-unittest2 to use %s" % __name__)
else:
import unittest
from unittest import SkipTest
from functools import wraps
from logilab.common.debugger import Debugger, colorize_source
from logilab.common.decorators import cached, classproperty
from logilab.common import textutils
__all__ = ['main', 'unittest_main', 'find_tests', 'run_test', 'spawn']
DEFAULT_PREFIXES = ('test', 'regrtest', 'smoketest', 'unittest',
'func', 'validation')
is_generator = deprecated('[lgc 0.63] use inspect.isgeneratorfunction')(isgeneratorfunction)
# used by unittest to count the number of relevant levels in the traceback
__unittest = 1
def with_tempdir(callable):
"""A decorator ensuring no temporary file left when the function return
Work only for temporary file create with the tempfile module"""
if isgeneratorfunction(callable):
def proxy(*args, **kwargs):
old_tmpdir = tempfile.gettempdir()
new_tmpdir = tempfile.mkdtemp(prefix="temp-lgc-")
tempfile.tempdir = new_tmpdir
try:
for x in callable(*args, **kwargs):
yield x
finally:
try:
rmtree(new_tmpdir, ignore_errors=True)
finally:
tempfile.tempdir = old_tmpdir
return proxy
@wraps(callable)
def proxy(*args, **kargs):
old_tmpdir = tempfile.gettempdir()
new_tmpdir = tempfile.mkdtemp(prefix="temp-lgc-")
tempfile.tempdir = new_tmpdir
try:
return callable(*args, **kargs)
finally:
try:
rmtree(new_tmpdir, ignore_errors=True)
finally:
tempfile.tempdir = old_tmpdir
return proxy
def in_tempdir(callable):
"""A decorator moving the enclosed function inside the tempfile.tempfdir
"""
@wraps(callable)
def proxy(*args, **kargs):
old_cwd = os.getcwd()
os.chdir(tempfile.tempdir)
try:
return callable(*args, **kargs)
finally:
os.chdir(old_cwd)
return proxy
def within_tempdir(callable):
"""A decorator run the enclosed function inside a tmpdir removed after execution
"""
proxy = with_tempdir(in_tempdir(callable))
proxy.__name__ = callable.__name__
return proxy
def find_tests(testdir,
prefixes=DEFAULT_PREFIXES, suffix=".py",
excludes=(),
remove_suffix=True):
"""
Return a list of all applicable test modules.
"""
tests = []
for name in os.listdir(testdir):
if not suffix or name.endswith(suffix):
for prefix in prefixes:
if name.startswith(prefix):
if remove_suffix and name.endswith(suffix):
name = name[:-len(suffix)]
if name not in excludes:
tests.append(name)
tests.sort()
return tests
## PostMortem Debug facilities #####
def start_interactive_mode(result):
"""starts an interactive shell so that the user can inspect errors
"""
debuggers = result.debuggers
descrs = result.error_descrs + result.fail_descrs
if len(debuggers) == 1:
# don't ask for test name if there's only one failure
debuggers[0].start()
else:
while True:
testindex = 0
print("Choose a test to debug:")
# order debuggers in the same way than errors were printed
print("\n".join(['\t%s : %s' % (i, descr) for i, (_, descr)
in enumerate(descrs)]))
print("Type 'exit' (or ^D) to quit")
print()
try:
todebug = input('Enter a test name: ')
if todebug.strip().lower() == 'exit':
print()
break
else:
try:
testindex = int(todebug)
debugger = debuggers[descrs[testindex][0]]
except (ValueError, IndexError):
print("ERROR: invalid test number %r" % (todebug, ))
else:
debugger.start()
except (EOFError, KeyboardInterrupt):
print()
break
# test utils ##################################################################
class SkipAwareTestResult(unittest._TextTestResult):
def __init__(self, stream, descriptions, verbosity,
exitfirst=False, pdbmode=False, cvg=None, colorize=False):
super(SkipAwareTestResult, self).__init__(stream,
descriptions, verbosity)
self.skipped = []
self.debuggers = []
self.fail_descrs = []
self.error_descrs = []
self.exitfirst = exitfirst
self.pdbmode = pdbmode
self.cvg = cvg
self.colorize = colorize
self.pdbclass = Debugger
self.verbose = verbosity > 1
def descrs_for(self, flavour):
return getattr(self, '%s_descrs' % flavour.lower())
def _create_pdb(self, test_descr, flavour):
self.descrs_for(flavour).append( (len(self.debuggers), test_descr) )
if self.pdbmode:
self.debuggers.append(self.pdbclass(sys.exc_info()[2]))
def _iter_valid_frames(self, frames):
"""only consider non-testlib frames when formatting traceback"""
lgc_testlib = osp.abspath(__file__)
std_testlib = osp.abspath(unittest.__file__)
invalid = lambda fi: osp.abspath(fi[1]) in (lgc_testlib, std_testlib)
for frameinfo in dropwhile(invalid, frames):
yield frameinfo
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string.
This method is overridden here because we want to colorize
lines if --color is passed, and display local variables if
--verbose is passed
"""
exctype, exc, tb = err
output = ['Traceback (most recent call last)']
frames = inspect.getinnerframes(tb)
colorize = self.colorize
frames = enumerate(self._iter_valid_frames(frames))
for index, (frame, filename, lineno, funcname, ctx, ctxindex) in frames:
filename = osp.abspath(filename)
if ctx is None: # pyc files or C extensions for instance
source = '<no source available>'
else:
source = ''.join(ctx)
if colorize:
filename = textutils.colorize_ansi(filename, 'magenta')
source = colorize_source(source)
output.append(' File "%s", line %s, in %s' % (filename, lineno, funcname))
output.append(' %s' % source.strip())
if self.verbose:
output.append('%r == %r' % (dir(frame), test.__module__))
output.append('')
output.append(' ' + ' local variables '.center(66, '-'))
for varname, value in sorted(frame.f_locals.items()):
output.append(' %s: %r' % (varname, value))
if varname == 'self': # special handy processing for self
for varname, value in sorted(vars(value).items()):
output.append(' self.%s: %r' % (varname, value))
output.append(' ' + '-' * 66)
output.append('')
output.append(''.join(traceback.format_exception_only(exctype, exc)))
return '\n'.join(output)
def addError(self, test, err):
"""err -> (exc_type, exc, tcbk)"""
exc_type, exc, _ = err
if isinstance(exc, SkipTest):
assert exc_type == SkipTest
self.addSkip(test, exc)
else:
if self.exitfirst:
self.shouldStop = True
descr = self.getDescription(test)
super(SkipAwareTestResult, self).addError(test, err)
self._create_pdb(descr, 'error')
def addFailure(self, test, err):
if self.exitfirst:
self.shouldStop = True
descr = self.getDescription(test)
super(SkipAwareTestResult, self).addFailure(test, err)
self._create_pdb(descr, 'fail')
def addSkip(self, test, reason):
self.skipped.append((test, reason))
if self.showAll:
self.stream.writeln("SKIPPED")
elif self.dots:
self.stream.write('S')
def printErrors(self):
super(SkipAwareTestResult, self).printErrors()
self.printSkippedList()
def printSkippedList(self):
# format (test, err) compatible with unittest2
for test, err in self.skipped:
descr = self.getDescription(test)
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % ('SKIPPED', descr))
self.stream.writeln("\t%s" % err)
def printErrorList(self, flavour, errors):
for (_, descr), (test, err) in zip(self.descrs_for(flavour), errors):
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour, descr))
self.stream.writeln(self.separator2)
self.stream.writeln(err)
self.stream.writeln('no stdout'.center(len(self.separator2)))
self.stream.writeln('no stderr'.center(len(self.separator2)))
# Add deprecation warnings about new api used by module level fixtures in unittest2
# http://www.voidspace.org.uk/python/articles/unittest2.shtml#setupmodule-and-teardownmodule
class _DebugResult(object): # simplify import statement among unittest flavors..
"Used by the TestSuite to hold previous class when running in debug."
_previousTestClass = None
_moduleSetUpFailed = False
shouldStop = False
from logilab.common.decorators import monkeypatch
@monkeypatch(unittest.TestSuite)
def _handleModuleTearDown(self, result):
previousModule = self._get_previous_module(result)
if previousModule is None:
return
if result._moduleSetUpFailed:
return
try:
module = sys.modules[previousModule]
except KeyError:
return
# add testlib specific deprecation warning and switch to new api
if hasattr(module, 'teardown_module'):
warnings.warn('Please rename teardown_module() to tearDownModule() instead.',
DeprecationWarning)
setattr(module, 'tearDownModule', module.teardown_module)
# end of monkey-patching
tearDownModule = getattr(module, 'tearDownModule', None)
if tearDownModule is not None:
try:
tearDownModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
errorName = 'tearDownModule (%s)' % previousModule
self._addClassOrModuleLevelException(result, e, errorName)
@monkeypatch(unittest.TestSuite)
def _handleModuleFixture(self, test, result):
previousModule = self._get_previous_module(result)
currentModule = test.__class__.__module__
if currentModule == previousModule:
return
self._handleModuleTearDown(result)
result._moduleSetUpFailed = False
try:
module = sys.modules[currentModule]
except KeyError:
return
# add testlib specific deprecation warning and switch to new api
if hasattr(module, 'setup_module'):
warnings.warn('Please rename setup_module() to setUpModule() instead.',
DeprecationWarning)
setattr(module, 'setUpModule', module.setup_module)
# end of monkey-patching
setUpModule = getattr(module, 'setUpModule', None)
if setUpModule is not None:
try:
setUpModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
result._moduleSetUpFailed = True
errorName = 'setUpModule (%s)' % currentModule
self._addClassOrModuleLevelException(result, e, errorName)
# backward compatibility: TestSuite might be imported from lgc.testlib
TestSuite = unittest.TestSuite
class keywords(dict):
"""Keyword args (**kwargs) support for generative tests."""
class starargs(tuple):
"""Variable arguments (*args) for generative tests."""
def __new__(cls, *args):
return tuple.__new__(cls, args)
unittest_main = unittest.main
class InnerTestSkipped(SkipTest):
"""raised when a test is skipped"""
pass
def parse_generative_args(params):
args = []
varargs = ()
kwargs = {}
flags = 0 # 2 <=> starargs, 4 <=> kwargs
for param in params:
if isinstance(param, starargs):
varargs = param
if flags:
raise TypeError('found starargs after keywords !')
flags |= 2
args += list(varargs)
elif isinstance(param, keywords):
kwargs = param
if flags & 4:
raise TypeError('got multiple keywords parameters')
flags |= 4
elif flags & 2 or flags & 4:
raise TypeError('found parameters after kwargs or args')
else:
args.append(param)
return args, kwargs
class InnerTest(tuple):
def __new__(cls, name, *data):
instance = tuple.__new__(cls, data)
instance.name = name
return instance
class Tags(set):
"""A set of tag able validate an expression"""
def __init__(self, *tags, **kwargs):
self.inherit = kwargs.pop('inherit', True)
if kwargs:
raise TypeError("%s are an invalid keyword argument for this function" % kwargs.keys())
if len(tags) == 1 and not isinstance(tags[0], string_types):
tags = tags[0]
super(Tags, self).__init__(tags, **kwargs)
def __getitem__(self, key):
return key in self
def match(self, exp):
return eval(exp, {}, self)
# duplicate definition from unittest2 of the _deprecate decorator
def _deprecate(original_func):
def deprecated_func(*args, **kwargs):
warnings.warn(
('Please use %s instead.' % original_func.__name__),
DeprecationWarning, 2)
return original_func(*args, **kwargs)
return deprecated_func
class TestCase(unittest.TestCase):
"""A unittest.TestCase extension with some additional methods."""
maxDiff = None
pdbclass = Debugger
tags = Tags()
def __init__(self, methodName='runTest'):
super(TestCase, self).__init__(methodName)
self.__exc_info = sys.exc_info
self.__testMethodName = self._testMethodName
self._current_test_descr = None
self._options_ = None
@classproperty
@cached
def datadir(cls): # pylint: disable=E0213
"""helper attribute holding the standard test's data directory
NOTE: this is a logilab's standard
"""
mod = __import__(cls.__module__)
return osp.join(osp.dirname(osp.abspath(mod.__file__)), 'data')
# cache it (use a class method to cache on class since TestCase is
# instantiated for each test run)
@classmethod
def datapath(cls, *fname):
"""joins the object's datadir and `fname`"""
return osp.join(cls.datadir, *fname)
def set_description(self, descr):
"""sets the current test's description.
This can be useful for generative tests because it allows to specify
a description per yield
"""
self._current_test_descr = descr
# override default's unittest.py feature
def shortDescription(self):
"""override default unittest shortDescription to handle correctly
generative tests
"""
if self._current_test_descr is not None:
return self._current_test_descr
return super(TestCase, self).shortDescription()
def quiet_run(self, result, func, *args, **kwargs):
try:
func(*args, **kwargs)
except (KeyboardInterrupt, SystemExit):
raise
except unittest.SkipTest as e:
if hasattr(result, 'addSkip'):
result.addSkip(self, str(e))
else:
warnings.warn("TestResult has no addSkip method, skips not reported",
RuntimeWarning, 2)
result.addSuccess(self)
return False
except:
result.addError(self, self.__exc_info())
return False
return True
def _get_test_method(self):
"""return the test method"""
return getattr(self, self._testMethodName)
def optval(self, option, default=None):
"""return the option value or default if the option is not define"""
return getattr(self._options_, option, default)
def __call__(self, result=None, runcondition=None, options=None):
"""rewrite TestCase.__call__ to support generative tests
This is mostly a copy/paste from unittest.py (i.e same
variable names, same logic, except for the generative tests part)
"""
from logilab.common.pytest import FILE_RESTART
if result is None:
result = self.defaultTestResult()
result.pdbclass = self.pdbclass
self._options_ = options
# if result.cvg:
# result.cvg.start()
testMethod = self._get_test_method()
if (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)):
# If the class or method was skipped.
try:
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(testMethod, '__unittest_skip_why__', ''))
self._addSkip(result, skip_why)
finally:
result.stopTest(self)
return
if runcondition and not runcondition(testMethod):
return # test is skipped
result.startTest(self)
try:
if not self.quiet_run(result, self.setUp):
return
generative = isgeneratorfunction(testMethod)
# generative tests
if generative:
self._proceed_generative(result, testMethod,
runcondition)
else:
status = self._proceed(result, testMethod)
success = (status == 0)
if not self.quiet_run(result, self.tearDown):
return
if not generative and success:
if hasattr(options, "exitfirst") and options.exitfirst:
# add this test to restart file
try:
restartfile = open(FILE_RESTART, 'a')
try:
descr = '.'.join((self.__class__.__module__,
self.__class__.__name__,
self._testMethodName))
restartfile.write(descr+os.linesep)
finally:
restartfile.close()
except Exception:
print("Error while saving succeeded test into",
osp.join(os.getcwd(), FILE_RESTART),
file=sys.__stderr__)
raise
result.addSuccess(self)
finally:
# if result.cvg:
# result.cvg.stop()
result.stopTest(self)
def _proceed_generative(self, result, testfunc, runcondition=None):
# cancel startTest()'s increment
result.testsRun -= 1
success = True
try:
for params in testfunc():
if runcondition and not runcondition(testfunc,
skipgenerator=False):
if not (isinstance(params, InnerTest)
and runcondition(params)):
continue
if not isinstance(params, (tuple, list)):
params = (params, )
func = params[0]
args, kwargs = parse_generative_args(params[1:])
# increment test counter manually
result.testsRun += 1
status = self._proceed(result, func, args, kwargs)
if status == 0:
result.addSuccess(self)
success = True
else:
success = False
# XXX Don't stop anymore if an error occured
#if status == 2:
# result.shouldStop = True
if result.shouldStop: # either on error or on exitfirst + error
break
except:
# if an error occurs between two yield
result.addError(self, self.__exc_info())
success = False
return success
def _proceed(self, result, testfunc, args=(), kwargs=None):
"""proceed the actual test
returns 0 on success, 1 on failure, 2 on error
Note: addSuccess can't be called here because we have to wait
for tearDown to be successfully executed to declare the test as
successful
"""
kwargs = kwargs or {}
try:
testfunc(*args, **kwargs)
except self.failureException:
result.addFailure(self, self.__exc_info())
return 1
except KeyboardInterrupt:
raise
except InnerTestSkipped as e:
result.addSkip(self, e)
return 1
except SkipTest as e:
result.addSkip(self, e)
return 0
except:
result.addError(self, self.__exc_info())
return 2
return 0
def defaultTestResult(self):
"""return a new instance of the defaultTestResult"""
return SkipAwareTestResult()
skip = _deprecate(unittest.TestCase.skipTest)
assertEquals = _deprecate(unittest.TestCase.assertEqual)
assertNotEquals = _deprecate(unittest.TestCase.assertNotEqual)
assertAlmostEquals = _deprecate(unittest.TestCase.assertAlmostEqual)
assertNotAlmostEquals = _deprecate(unittest.TestCase.assertNotAlmostEqual)
def innerSkip(self, msg=None):
"""mark a generative test as skipped for the <msg> reason"""
msg = msg or 'test was skipped'
raise InnerTestSkipped(msg)
@deprecated('Please use assertDictEqual instead.')
def assertDictEquals(self, dict1, dict2, msg=None, context=None):
"""compares two dicts
If the two dict differ, the first difference is shown in the error
message
:param dict1: a Python Dictionary
:param dict2: a Python Dictionary
:param msg: custom message (String) in case of failure
"""
dict1 = dict(dict1)
msgs = []
for key, value in dict2.items():
try:
if dict1[key] != value:
msgs.append('%r != %r for key %r' % (dict1[key], value,
key))
del dict1[key]
except KeyError:
msgs.append('missing %r key' % key)
if dict1:
msgs.append('dict2 is lacking %r' % dict1)
if msg:
self.failureException(msg)
elif msgs:
if context is not None:
base = '%s\n' % context
else:
base = ''
self.fail(base + '\n'.join(msgs))
@deprecated('Please use assertCountEqual instead.')
def assertUnorderedIterableEquals(self, got, expected, msg=None):
"""compares two iterable and shows difference between both
:param got: the unordered Iterable that we found
:param expected: the expected unordered Iterable
:param msg: custom message (String) in case of failure
"""
got, expected = list(got), list(expected)
self.assertSetEqual(set(got), set(expected), msg)
if len(got) != len(expected):
if msg is None:
msg = ['Iterable have the same elements but not the same number',
'\t<element>\t<expected>i\t<got>']
got_count = {}
expected_count = {}
for element in got:
got_count[element] = got_count.get(element, 0) + 1
for element in expected:
expected_count[element] = expected_count.get(element, 0) + 1
# we know that got_count.key() == expected_count.key()
# because of assertSetEqual
for element, count in got_count.iteritems():
other_count = expected_count[element]
if other_count != count:
msg.append('\t%s\t%s\t%s' % (element, other_count, count))
self.fail(msg)
assertUnorderedIterableEqual = assertUnorderedIterableEquals
assertUnordIterEquals = assertUnordIterEqual = assertUnorderedIterableEqual
@deprecated('Please use assertSetEqual instead.')
def assertSetEquals(self,got,expected, msg=None):
"""compares two sets and shows difference between both
Don't use it for iterables other than sets.
:param got: the Set that we found
:param expected: the second Set to be compared to the first one
:param msg: custom message (String) in case of failure
"""
if not(isinstance(got, set) and isinstance(expected, set)):
warnings.warn("the assertSetEquals function if now intended for set only."\
"use assertUnorderedIterableEquals instead.",
DeprecationWarning, 2)
return self.assertUnorderedIterableEquals(got, expected, msg)
items={}
items['missing'] = expected - got
items['unexpected'] = got - expected
if any(items.itervalues()):
if msg is None:
msg = '\n'.join('%s:\n\t%s' % (key, "\n\t".join(str(value) for value in values))
for key, values in items.iteritems() if values)
self.fail(msg)
@deprecated('Please use assertListEqual instead.')
def assertListEquals(self, list_1, list_2, msg=None):
"""compares two lists
If the two list differ, the first difference is shown in the error
message
:param list_1: a Python List
:param list_2: a second Python List
:param msg: custom message (String) in case of failure
"""
_l1 = list_1[:]
for i, value in enumerate(list_2):
try:
if _l1[0] != value:
from pprint import pprint
pprint(list_1)
pprint(list_2)
self.fail('%r != %r for index %d' % (_l1[0], value, i))
del _l1[0]
except IndexError:
if msg is None:
msg = 'list_1 has only %d elements, not %s '\
'(at least %r missing)'% (i, len(list_2), value)
self.fail(msg)
if _l1:
if msg is None:
msg = 'list_2 is lacking %r' % _l1
self.fail(msg)
@deprecated('Non-standard. Please use assertMultiLineEqual instead.')
def assertLinesEquals(self, string1, string2, msg=None, striplines=False):
"""compare two strings and assert that the text lines of the strings
are equal.
:param string1: a String
:param string2: a String
:param msg: custom message (String) in case of failure
:param striplines: Boolean to trigger line stripping before comparing
"""
lines1 = string1.splitlines()
lines2 = string2.splitlines()
if striplines:
lines1 = [l.strip() for l in lines1]
lines2 = [l.strip() for l in lines2]
self.assertListEqual(lines1, lines2, msg)
assertLineEqual = assertLinesEquals
@deprecated('Non-standard: please copy test method to your TestCase class')
def assertXMLWellFormed(self, stream, msg=None, context=2):
"""asserts the XML stream is well-formed (no DTD conformance check)
:param context: number of context lines in standard message
(show all data if negative).
Only available with element tree
"""
try:
from xml.etree.ElementTree import parse
self._assertETXMLWellFormed(stream, parse, msg)
except ImportError:
from xml.sax import make_parser, SAXParseException
parser = make_parser()
try:
parser.parse(stream)
except SAXParseException as ex:
if msg is None:
stream.seek(0)
for _ in range(ex.getLineNumber()):
line = stream.readline()
pointer = ('' * (ex.getLineNumber() - 1)) + '^'
msg = 'XML stream not well formed: %s\n%s%s' % (ex, line, pointer)
self.fail(msg)
@deprecated('Non-standard: please copy test method to your TestCase class')
def assertXMLStringWellFormed(self, xml_string, msg=None, context=2):
"""asserts the XML string is well-formed (no DTD conformance check)
:param context: number of context lines in standard message
(show all data if negative).
Only available with element tree
"""
try:
from xml.etree.ElementTree import fromstring
except ImportError:
from elementtree.ElementTree import fromstring
self._assertETXMLWellFormed(xml_string, fromstring, msg)
def _assertETXMLWellFormed(self, data, parse, msg=None, context=2):
"""internal function used by /assertXML(String)?WellFormed/ functions
:param data: xml_data
:param parse: appropriate parser function for this data
:param msg: error message
:param context: number of context lines in standard message
(show all data if negative).
Only available with element tree
"""
from xml.parsers.expat import ExpatError
try:
from xml.etree.ElementTree import ParseError
except ImportError:
# compatibility for <python2.7
ParseError = ExpatError
try:
parse(data)
except (ExpatError, ParseError) as ex:
if msg is None:
if hasattr(data, 'readlines'): #file like object
data.seek(0)
lines = data.readlines()
else:
lines = data.splitlines(True)
nb_lines = len(lines)
context_lines = []
# catch when ParseError doesn't set valid lineno
if ex.lineno is not None:
if context < 0:
start = 1
end = nb_lines
else:
start = max(ex.lineno-context, 1)
end = min(ex.lineno+context, nb_lines)
line_number_length = len('%i' % end)
line_pattern = " %%%ii: %%s" % line_number_length
for line_no in range(start, ex.lineno):
context_lines.append(line_pattern % (line_no, lines[line_no-1]))
context_lines.append(line_pattern % (ex.lineno, lines[ex.lineno-1]))
context_lines.append('%s^\n' % (' ' * (1 + line_number_length + 2 +ex.offset)))
for line_no in range(ex.lineno+1, end+1):
context_lines.append(line_pattern % (line_no, lines[line_no-1]))
rich_context = ''.join(context_lines)
msg = 'XML stream not well formed: %s\n%s' % (ex, rich_context)
self.fail(msg)
@deprecated('Non-standard: please copy test method to your TestCase class')
def assertXMLEqualsTuple(self, element, tup):
"""compare an ElementTree Element to a tuple formatted as follow:
(tagname, [attrib[, children[, text[, tail]]]])"""
# check tag
self.assertTextEquals(element.tag, tup[0])
# check attrib
if len(element.attrib) or len(tup)>1:
if len(tup)<=1:
self.fail( "tuple %s has no attributes (%s expected)"%(tup,
dict(element.attrib)))
self.assertDictEqual(element.attrib, tup[1])
# check children
if len(element) or len(tup)>2:
if len(tup)<=2:
self.fail( "tuple %s has no children (%i expected)"%(tup,
len(element)))
if len(element) != len(tup[2]):
self.fail( "tuple %s has %i children%s (%i expected)"%(tup,
len(tup[2]),
('', 's')[len(tup[2])>1], len(element)))
for index in range(len(tup[2])):
self.assertXMLEqualsTuple(element[index], tup[2][index])
#check text
if element.text or len(tup)>3:
if len(tup)<=3:
self.fail( "tuple %s has no text value (%r expected)"%(tup,
element.text))
self.assertTextEquals(element.text, tup[3])
#check tail
if element.tail or len(tup)>4:
if len(tup)<=4:
self.fail( "tuple %s has no tail value (%r expected)"%(tup,
element.tail))
self.assertTextEquals(element.tail, tup[4])
def _difftext(self, lines1, lines2, junk=None, msg_prefix='Texts differ'):
junk = junk or (' ', '\t')
# result is a generator
result = difflib.ndiff(lines1, lines2, charjunk=lambda x: x in junk)
read = []
for line in result:
read.append(line)
# lines that don't start with a ' ' are diff ones
if not line.startswith(' '):
self.fail('\n'.join(['%s\n'%msg_prefix]+read + list(result)))
@deprecated('Non-standard. Please use assertMultiLineEqual instead.')
def assertTextEquals(self, text1, text2, junk=None,
msg_prefix='Text differ', striplines=False):
"""compare two multiline strings (using difflib and splitlines())
:param text1: a Python BaseString
:param text2: a second Python Basestring
:param junk: List of Caracters
:param msg_prefix: String (message prefix)
:param striplines: Boolean to trigger line stripping before comparing
"""
msg = []
if not isinstance(text1, string_types):
msg.append('text1 is not a string (%s)'%(type(text1)))
if not isinstance(text2, string_types):
msg.append('text2 is not a string (%s)'%(type(text2)))
if msg:
self.fail('\n'.join(msg))
lines1 = text1.strip().splitlines(True)
lines2 = text2.strip().splitlines(True)
if striplines:
lines1 = [line.strip() for line in lines1]
lines2 = [line.strip() for line in lines2]
self._difftext(lines1, lines2, junk, msg_prefix)
assertTextEqual = assertTextEquals
@deprecated('Non-standard: please copy test method to your TestCase class')
def assertStreamEquals(self, stream1, stream2, junk=None,
msg_prefix='Stream differ'):
"""compare two streams (using difflib and readlines())"""
# if stream2 is stream2, readlines() on stream1 will also read lines
# in stream2, so they'll appear different, although they're not
if stream1 is stream2:
return
# make sure we compare from the beginning of the stream
stream1.seek(0)
stream2.seek(0)
# compare
self._difftext(stream1.readlines(), stream2.readlines(), junk,
msg_prefix)
assertStreamEqual = assertStreamEquals
@deprecated('Non-standard: please copy test method to your TestCase class')
def assertFileEquals(self, fname1, fname2, junk=(' ', '\t')):
"""compares two files using difflib"""
self.assertStreamEqual(open(fname1), open(fname2), junk,
msg_prefix='Files differs\n-:%s\n+:%s\n'%(fname1, fname2))
assertFileEqual = assertFileEquals
@deprecated('Non-standard: please copy test method to your TestCase class')
def assertDirEquals(self, path_a, path_b):
"""compares two files using difflib"""
assert osp.exists(path_a), "%s doesn't exists" % path_a
assert osp.exists(path_b), "%s doesn't exists" % path_b
all_a = [ (ipath[len(path_a):].lstrip('/'), idirs, ifiles)
for ipath, idirs, ifiles in os.walk(path_a)]
all_a.sort(key=itemgetter(0))
all_b = [ (ipath[len(path_b):].lstrip('/'), idirs, ifiles)
for ipath, idirs, ifiles in os.walk(path_b)]
all_b.sort(key=itemgetter(0))
iter_a, iter_b = iter(all_a), iter(all_b)
partial_iter = True
ipath_a, idirs_a, ifiles_a = data_a = None, None, None
while True:
try:
ipath_a, idirs_a, ifiles_a = datas_a = next(iter_a)
partial_iter = False
ipath_b, idirs_b, ifiles_b = datas_b = next(iter_b)
partial_iter = True
self.assertTrue(ipath_a == ipath_b,
"unexpected %s in %s while looking %s from %s" %
(ipath_a, path_a, ipath_b, path_b))
errors = {}
sdirs_a = set(idirs_a)
sdirs_b = set(idirs_b)
errors["unexpected directories"] = sdirs_a - sdirs_b
errors["missing directories"] = sdirs_b - sdirs_a
sfiles_a = set(ifiles_a)
sfiles_b = set(ifiles_b)
errors["unexpected files"] = sfiles_a - sfiles_b
errors["missing files"] = sfiles_b - sfiles_a
msgs = [ "%s: %s"% (name, items)
for name, items in errors.items() if items]
if msgs:
msgs.insert(0, "%s and %s differ :" % (
osp.join(path_a, ipath_a),
osp.join(path_b, ipath_b),
))
self.fail("\n".join(msgs))
for files in (ifiles_a, ifiles_b):
files.sort()
for index, path in enumerate(ifiles_a):
self.assertFileEquals(osp.join(path_a, ipath_a, path),
osp.join(path_b, ipath_b, ifiles_b[index]))
except StopIteration:
break
assertDirEqual = assertDirEquals
def assertIsInstance(self, obj, klass, msg=None, strict=False):
"""check if an object is an instance of a class
:param obj: the Python Object to be checked
:param klass: the target class
:param msg: a String for a custom message
:param strict: if True, check that the class of <obj> is <klass>;
else check with 'isinstance'
"""
if strict:
warnings.warn('[API] Non-standard. Strict parameter has vanished',
DeprecationWarning, stacklevel=2)
if msg is None:
if strict:
msg = '%r is not of class %s but of %s'
else:
msg = '%r is not an instance of %s but of %s'
msg = msg % (obj, klass, type(obj))
if strict:
self.assertTrue(obj.__class__ is klass, msg)
else:
self.assertTrue(isinstance(obj, klass), msg)
@deprecated('Please use assertIsNone instead.')
def assertNone(self, obj, msg=None):
"""assert obj is None
:param obj: Python Object to be tested
"""
if msg is None:
msg = "reference to %r when None expected"%(obj,)
self.assertTrue( obj is None, msg )
@deprecated('Please use assertIsNotNone instead.')
def assertNotNone(self, obj, msg=None):
"""assert obj is not None"""
if msg is None:
msg = "unexpected reference to None"
self.assertTrue( obj is not None, msg )
@deprecated('Non-standard. Please use assertAlmostEqual instead.')
def assertFloatAlmostEquals(self, obj, other, prec=1e-5,
relative=False, msg=None):
"""compares if two floats have a distance smaller than expected
precision.
:param obj: a Float
:param other: another Float to be comparted to <obj>
:param prec: a Float describing the precision
:param relative: boolean switching to relative/absolute precision
:param msg: a String for a custom message
"""
if msg is None:
msg = "%r != %r" % (obj, other)
if relative:
prec = prec*math.fabs(obj)
self.assertTrue(math.fabs(obj - other) < prec, msg)
def failUnlessRaises(self, excClass, callableObj=None, *args, **kwargs):
"""override default failUnlessRaises method to return the raised
exception instance.
Fail unless an exception of class excClass is thrown
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
thrown, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
CAUTION! There are subtle differences between Logilab and unittest2
- exc is not returned in standard version
- context capabilities in standard version
- try/except/else construction (minor)
:param excClass: the Exception to be raised
:param callableObj: a callable Object which should raise <excClass>
:param args: a List of arguments for <callableObj>
:param kwargs: a List of keyword arguments for <callableObj>
"""
# XXX cube vcslib : test_branches_from_app
if callableObj is None:
_assert = super(TestCase, self).assertRaises
return _assert(excClass, callableObj, *args, **kwargs)
try:
callableObj(*args, **kwargs)
except excClass as exc:
class ProxyException:
def __init__(self, obj):
self._obj = obj
def __getattr__(self, attr):
warn_msg = ("This exception was retrieved with the old testlib way "
"`exc = self.assertRaises(Exc, callable)`, please use "
"the context manager instead'")
warnings.warn(warn_msg, DeprecationWarning, 2)
return self._obj.__getattribute__(attr)
return ProxyException(exc)
else:
if hasattr(excClass, '__name__'):
excName = excClass.__name__
else:
excName = str(excClass)
raise self.failureException("%s not raised" % excName)
assertRaises = failUnlessRaises
if sys.version_info >= (3,2):
assertItemsEqual = unittest.TestCase.assertCountEqual
else:
assertCountEqual = unittest.TestCase.assertItemsEqual
if sys.version_info < (2,7):
def assertIsNotNone(self, value, *args, **kwargs):
self.assertNotEqual(None, value, *args, **kwargs)
TestCase.assertItemsEqual = deprecated('assertItemsEqual is deprecated, use assertCountEqual')(
TestCase.assertItemsEqual)
import doctest
class SkippedSuite(unittest.TestSuite):
def test(self):
"""just there to trigger test execution"""
self.skipped_test('doctest module has no DocTestSuite class')
class DocTestFinder(doctest.DocTestFinder):
def __init__(self, *args, **kwargs):
self.skipped = kwargs.pop('skipped', ())
doctest.DocTestFinder.__init__(self, *args, **kwargs)
def _get_test(self, obj, name, module, globs, source_lines):
"""override default _get_test method to be able to skip tests
according to skipped attribute's value
"""
if getattr(obj, '__name__', '') in self.skipped:
return None
return doctest.DocTestFinder._get_test(self, obj, name, module,
globs, source_lines)
class DocTest(TestCase):
"""trigger module doctest
I don't know how to make unittest.main consider the DocTestSuite instance
without this hack
"""
skipped = ()
def __call__(self, result=None, runcondition=None, options=None):\
# pylint: disable=W0613
try:
finder = DocTestFinder(skipped=self.skipped)
suite = doctest.DocTestSuite(self.module, test_finder=finder)
# XXX iirk
doctest.DocTestCase._TestCase__exc_info = sys.exc_info
except AttributeError:
suite = SkippedSuite()
# doctest may gork the builtins dictionnary
# This happen to the "_" entry used by gettext
old_builtins = builtins.__dict__.copy()
try:
return suite.run(result)
finally:
builtins.__dict__.clear()
builtins.__dict__.update(old_builtins)
run = __call__
def test(self):
"""just there to trigger test execution"""
MAILBOX = None
class MockSMTP:
"""fake smtplib.SMTP"""
def __init__(self, host, port):
self.host = host
self.port = port
global MAILBOX
self.reveived = MAILBOX = []
def set_debuglevel(self, debuglevel):
"""ignore debug level"""
def sendmail(self, fromaddr, toaddres, body):
"""push sent mail in the mailbox"""
self.reveived.append((fromaddr, toaddres, body))
def quit(self):
"""ignore quit"""
class MockConfigParser(configparser.ConfigParser):
"""fake ConfigParser.ConfigParser"""
def __init__(self, options):
configparser.ConfigParser.__init__(self)
for section, pairs in options.iteritems():
self.add_section(section)
for key, value in pairs.iteritems():
self.set(section, key, value)
def write(self, _):
raise NotImplementedError()
class MockConnection:
"""fake DB-API 2.0 connexion AND cursor (i.e. cursor() return self)"""
def __init__(self, results):
self.received = []
self.states = []
self.results = results
def cursor(self):
"""Mock cursor method"""
return self
def execute(self, query, args=None):
"""Mock execute method"""
self.received.append( (query, args) )
def fetchone(self):
"""Mock fetchone method"""
return self.results[0]
def fetchall(self):
"""Mock fetchall method"""
return self.results
def commit(self):
"""Mock commiy method"""
self.states.append( ('commit', len(self.received)) )
def rollback(self):
"""Mock rollback method"""
self.states.append( ('rollback', len(self.received)) )
def close(self):
"""Mock close method"""
pass
def mock_object(**params):
"""creates an object using params to set attributes
>>> option = mock_object(verbose=False, index=range(5))
>>> option.verbose
False
>>> option.index
[0, 1, 2, 3, 4]
"""
return type('Mock', (), params)()
def create_files(paths, chroot):
"""Creates directories and files found in <path>.
:param paths: list of relative paths to files or directories
:param chroot: the root directory in which paths will be created
>>> from os.path import isdir, isfile
>>> isdir('/tmp/a')
False
>>> create_files(['a/b/foo.py', 'a/b/c/', 'a/b/c/d/e.py'], '/tmp')
>>> isdir('/tmp/a')
True
>>> isdir('/tmp/a/b/c')
True
>>> isfile('/tmp/a/b/c/d/e.py')
True
>>> isfile('/tmp/a/b/foo.py')
True
"""
dirs, files = set(), set()
for path in paths:
path = osp.join(chroot, path)
filename = osp.basename(path)
# path is a directory path
if filename == '':
dirs.add(path)
# path is a filename path
else:
dirs.add(osp.dirname(path))
files.add(path)
for dirpath in dirs:
if not osp.isdir(dirpath):
os.makedirs(dirpath)
for filepath in files:
open(filepath, 'w').close()
class AttrObject: # XXX cf mock_object
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def tag(*args, **kwargs):
"""descriptor adding tag to a function"""
def desc(func):
assert not hasattr(func, 'tags')
func.tags = Tags(*args, **kwargs)
return func
return desc
def require_version(version):
""" Compare version of python interpreter to the given one. Skip the test
if older.
"""
def check_require_version(f):
version_elements = version.split('.')
try:
compare = tuple([int(v) for v in version_elements])
except ValueError:
raise ValueError('%s is not a correct version : should be X.Y[.Z].' % version)
current = sys.version_info[:3]
if current < compare:
def new_f(self, *args, **kwargs):
self.skipTest('Need at least %s version of python. Current version is %s.' % (version, '.'.join([str(element) for element in current])))
new_f.__name__ = f.__name__
return new_f
else:
return f
return check_require_version
def require_module(module):
""" Check if the given module is loaded. Skip the test if not.
"""
def check_require_module(f):
try:
__import__(module)
return f
except ImportError:
def new_f(self, *args, **kwargs):
self.skipTest('%s can not be imported.' % module)
new_f.__name__ = f.__name__
return new_f
return check_require_module
|
BigBrother1984/android_external_chromium_org
|
refs/heads/kitkat
|
tools/telemetry/telemetry/page/cloud_storage.py
|
23
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrappers for gsutil, for basic interaction with Google Cloud Storage."""
import cStringIO
import hashlib
import logging
import os
import subprocess
import sys
import tarfile
import urllib2
from telemetry.core import util
DEFAULT_BUCKET = 'chromium-wpr'
_GSUTIL_URL = 'http://storage.googleapis.com/pub/gsutil.tar.gz'
_DOWNLOAD_PATH = os.path.join(util.GetTelemetryDir(), 'third_party', 'gsutil')
class CloudStorageError(Exception):
pass
def _DownloadGsutil():
logging.info('Downloading gsutil')
response = urllib2.urlopen(_GSUTIL_URL)
with tarfile.open(fileobj=cStringIO.StringIO(response.read())) as tar_file:
tar_file.extractall(os.path.dirname(_DOWNLOAD_PATH))
logging.info('Downloaded gsutil to %s' % _DOWNLOAD_PATH)
return os.path.join(_DOWNLOAD_PATH, 'gsutil')
def _FindGsutil():
"""Return the gsutil executable path. If we can't find it, download it."""
search_paths = [_DOWNLOAD_PATH] + os.environ['PATH'].split(os.pathsep)
# Look for a depot_tools installation.
for path in search_paths:
gsutil_path = os.path.join(path, 'third_party', 'gsutil', 'gsutil')
if os.path.isfile(gsutil_path):
return gsutil_path
# Look for a gsutil installation.
for path in search_paths:
gsutil_path = os.path.join(path, 'gsutil')
if os.path.isfile(gsutil_path):
return gsutil_path
# Failed to find it. Download it!
return _DownloadGsutil()
def _RunCommand(args):
gsutil_path = _FindGsutil()
gsutil = subprocess.Popen([sys.executable, gsutil_path] + args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = gsutil.communicate()
if gsutil.returncode:
raise CloudStorageError(stderr.splitlines()[-1])
return stdout
def List(bucket):
stdout = _RunCommand(['ls', 'gs://%s' % bucket])
return [url.split('/')[-1] for url in stdout.splitlines()]
def Delete(bucket, remote_path):
url = 'gs://%s/%s' % (bucket, remote_path)
logging.info('Deleting %s' % url)
_RunCommand(['rm', url])
def Get(bucket, remote_path, local_path):
url = 'gs://%s/%s' % (bucket, remote_path)
logging.info('Downloading %s to %s' % (url, local_path))
_RunCommand(['cp', url, local_path])
def Insert(bucket, remote_path, local_path):
url = 'gs://%s/%s' % (bucket, remote_path)
logging.info('Uploading %s to %s' % (local_path, url))
_RunCommand(['cp', local_path, url])
def GetIfChanged(bucket, file_path):
"""Gets the file at file_path if it has a hash file that doesn't match."""
hash_path = file_path + '.sha1'
if not os.path.exists(hash_path):
return
with open(hash_path, 'rb') as f:
expected_hash = f.read(1024).rstrip()
if not os.path.exists(file_path) or GetHash(file_path) != expected_hash:
Get(bucket, expected_hash, file_path)
def GetHash(file_path):
"""Calculates and returns the hash of the file at file_path."""
sha1 = hashlib.sha1()
with open(file_path, 'rb') as f:
while True:
# Read in 1mb chunks, so it doesn't all have to be loaded into memory.
chunk = f.read(1024*1024)
if not chunk:
break
sha1.update(chunk)
return sha1.hexdigest()
|
himanshu219/django-pagination
|
refs/heads/master
|
pagination/paginator.py
|
36
|
from django.core.paginator import Paginator, Page, PageNotAnInteger, EmptyPage
class InfinitePaginator(Paginator):
"""
Paginator designed for cases when it's not important to know how many total
pages. This is useful for any object_list that has no count() method or can
be used to improve performance for MySQL by removing counts.
The orphans parameter has been removed for simplicity and there's a link
template string for creating the links to the next and previous pages.
"""
def __init__(self, object_list, per_page, allow_empty_first_page=True,
link_template='/page/%d/'):
orphans = 0 # no orphans
super(InfinitePaginator, self).__init__(object_list, per_page, orphans,
allow_empty_first_page)
# no count or num pages
del self._num_pages, self._count
# bonus links
self.link_template = link_template
def validate_number(self, number):
"""
Validates the given 1-based page number.
"""
try:
number = int(number)
except ValueError:
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
return number
def page(self, number):
"""
Returns a Page object for the given 1-based page number.
"""
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
page_items = self.object_list[bottom:top]
# check moved from validate_number
if not page_items:
if number == 1 and self.allow_empty_first_page:
pass
else:
raise EmptyPage('That page contains no results')
return InfinitePage(page_items, number, self)
def _get_count(self):
"""
Returns the total number of objects, across all pages.
"""
raise NotImplementedError
count = property(_get_count)
def _get_num_pages(self):
"""
Returns the total number of pages.
"""
raise NotImplementedError
num_pages = property(_get_num_pages)
def _get_page_range(self):
"""
Returns a 1-based range of pages for iterating through within
a template for loop.
"""
raise NotImplementedError
page_range = property(_get_page_range)
class InfinitePage(Page):
def __repr__(self):
return '<Page %s>' % self.number
def has_next(self):
"""
Checks for one more item than last on this page.
"""
try:
next_item = self.paginator.object_list[
self.number * self.paginator.per_page]
except IndexError:
return False
return True
def end_index(self):
"""
Returns the 1-based index of the last object on this page,
relative to total objects found (hits).
"""
return ((self.number - 1) * self.paginator.per_page +
len(self.object_list))
#Bonus methods for creating links
def next_link(self):
if self.has_next():
return self.paginator.link_template % (self.number + 1)
return None
def previous_link(self):
if self.has_previous():
return self.paginator.link_template % (self.number - 1)
return None
class FinitePaginator(InfinitePaginator):
"""
Paginator for cases when the list of items is already finite.
A good example is a list generated from an API call. This is a subclass
of InfinitePaginator because we have no idea how many items exist in the
full collection.
To accurately determine if the next page exists, a FinitePaginator MUST be
created with an object_list_plus that may contain more items than the
per_page count. Typically, you'll have an object_list_plus with one extra
item (if there's a next page). You'll also need to supply the offset from
the full collection in order to get the page start_index.
This is a very silly class but useful if you love the Django pagination
conventions.
"""
def __init__(self, object_list_plus, per_page, offset=None,
allow_empty_first_page=True, link_template='/page/%d/'):
super(FinitePaginator, self).__init__(object_list_plus, per_page,
allow_empty_first_page, link_template)
self.offset = offset
def validate_number(self, number):
super(FinitePaginator, self).validate_number(number)
# check for an empty list to see if the page exists
if not self.object_list:
if number == 1 and self.allow_empty_first_page:
pass
else:
raise EmptyPage('That page contains no results')
return number
def page(self, number):
"""
Returns a Page object for the given 1-based page number.
"""
number = self.validate_number(number)
# remove the extra item(s) when creating the page
page_items = self.object_list[:self.per_page]
return FinitePage(page_items, number, self)
class FinitePage(InfinitePage):
def has_next(self):
"""
Checks for one more item than last on this page.
"""
try:
next_item = self.paginator.object_list[self.paginator.per_page]
except IndexError:
return False
return True
def start_index(self):
"""
Returns the 1-based index of the first object on this page,
relative to total objects in the paginator.
"""
## TODO should this holler if you haven't defined the offset?
return self.paginator.offset
|
j-carl/ansible
|
refs/heads/devel
|
lib/ansible/errors/__init__.py
|
9
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible.errors.yaml_strings import (
YAML_COMMON_DICT_ERROR,
YAML_COMMON_LEADING_TAB_ERROR,
YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR,
YAML_COMMON_UNBALANCED_QUOTES_ERROR,
YAML_COMMON_UNQUOTED_COLON_ERROR,
YAML_COMMON_UNQUOTED_VARIABLE_ERROR,
YAML_POSITION_DETAILS,
YAML_AND_SHORTHAND_ERROR,
)
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.common._collections_compat import Sequence
class AnsibleError(Exception):
'''
This is the base class for all errors raised from Ansible code,
and can be instantiated with two optional parameters beyond the
error message to control whether detailed information is displayed
when the error occurred while parsing a data file of some kind.
Usage:
raise AnsibleError('some message here', obj=obj, show_content=True)
Where "obj" is some subclass of ansible.parsing.yaml.objects.AnsibleBaseYAMLObject,
which should be returned by the DataLoader() class.
'''
def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None):
super(AnsibleError, self).__init__(message)
# we import this here to prevent an import loop problem,
# since the objects code also imports ansible.errors
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
self._obj = obj
self._show_content = show_content
if obj and isinstance(obj, AnsibleBaseYAMLObject):
extended_error = self._get_extended_error()
if extended_error and not suppress_extended_error:
self.message = '%s\n\n%s' % (to_native(message), to_native(extended_error))
else:
self.message = '%s' % to_native(message)
else:
self.message = '%s' % to_native(message)
if orig_exc:
self.orig_exc = orig_exc
def __str__(self):
return self.message
def __repr__(self):
return self.message
def _get_error_lines_from_file(self, file_name, line_number):
'''
Returns the line in the file which corresponds to the reported error
location, as well as the line preceding it (if the error did not
occur on the first line), to provide context to the error.
'''
target_line = ''
prev_line = ''
with open(file_name, 'r') as f:
lines = f.readlines()
target_line = lines[line_number]
if line_number > 0:
prev_line = lines[line_number - 1]
return (target_line, prev_line)
def _get_extended_error(self):
'''
Given an object reporting the location of the exception in a file, return
detailed information regarding it including:
* the line which caused the error as well as the one preceding it
* causes and suggested remedies for common syntax errors
If this error was created with show_content=False, the reporting of content
is suppressed, as the file contents may be sensitive (ie. vault data).
'''
error_message = ''
try:
(src_file, line_number, col_number) = self._obj.ansible_pos
error_message += YAML_POSITION_DETAILS % (src_file, line_number, col_number)
if src_file not in ('<string>', '<unicode>') and self._show_content:
(target_line, prev_line) = self._get_error_lines_from_file(src_file, line_number - 1)
target_line = to_text(target_line)
prev_line = to_text(prev_line)
if target_line:
stripped_line = target_line.replace(" ", "")
# Check for k=v syntax in addition to YAML syntax and set the appropriate error position,
# arrow index
if re.search(r'\w+(\s+)?=(\s+)?[\w/-]+', prev_line):
error_position = prev_line.rstrip().find('=')
arrow_line = (" " * error_position) + "^ here"
error_message = YAML_POSITION_DETAILS % (src_file, line_number - 1, error_position + 1)
error_message += "\nThe offending line appears to be:\n\n%s\n%s\n\n" % (prev_line.rstrip(), arrow_line)
error_message += YAML_AND_SHORTHAND_ERROR
else:
arrow_line = (" " * (col_number - 1)) + "^ here"
error_message += "\nThe offending line appears to be:\n\n%s\n%s\n%s\n" % (prev_line.rstrip(), target_line.rstrip(), arrow_line)
# TODO: There may be cases where there is a valid tab in a line that has other errors.
if '\t' in target_line:
error_message += YAML_COMMON_LEADING_TAB_ERROR
# common error/remediation checking here:
# check for unquoted vars starting lines
if ('{{' in target_line and '}}' in target_line) and ('"{{' not in target_line or "'{{" not in target_line):
error_message += YAML_COMMON_UNQUOTED_VARIABLE_ERROR
# check for common dictionary mistakes
elif ":{{" in stripped_line and "}}" in stripped_line:
error_message += YAML_COMMON_DICT_ERROR
# check for common unquoted colon mistakes
elif (len(target_line) and
len(target_line) > 1 and
len(target_line) > col_number and
target_line[col_number] == ":" and
target_line.count(':') > 1):
error_message += YAML_COMMON_UNQUOTED_COLON_ERROR
# otherwise, check for some common quoting mistakes
else:
# FIXME: This needs to split on the first ':' to account for modules like lineinfile
# that may have lines that contain legitimate colons, e.g., line: 'i ALL= (ALL) NOPASSWD: ALL'
# and throw off the quote matching logic.
parts = target_line.split(":")
if len(parts) > 1:
middle = parts[1].strip()
match = False
unbalanced = False
if middle.startswith("'") and not middle.endswith("'"):
match = True
elif middle.startswith('"') and not middle.endswith('"'):
match = True
if (len(middle) > 0 and
middle[0] in ['"', "'"] and
middle[-1] in ['"', "'"] and
target_line.count("'") > 2 or
target_line.count('"') > 2):
unbalanced = True
if match:
error_message += YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR
if unbalanced:
error_message += YAML_COMMON_UNBALANCED_QUOTES_ERROR
except (IOError, TypeError):
error_message += '\n(could not open file to display line)'
except IndexError:
error_message += '\n(specified line no longer in file, maybe it changed?)'
return error_message
class AnsibleAssertionError(AnsibleError, AssertionError):
'''Invalid assertion'''
pass
class AnsibleOptionsError(AnsibleError):
''' bad or incomplete options passed '''
pass
class AnsibleParserError(AnsibleError):
''' something was detected early that is wrong about a playbook or data file '''
pass
class AnsibleInternalError(AnsibleError):
''' internal safeguards tripped, something happened in the code that should never happen '''
pass
class AnsibleRuntimeError(AnsibleError):
''' ansible had a problem while running a playbook '''
pass
class AnsibleModuleError(AnsibleRuntimeError):
''' a module failed somehow '''
pass
class AnsibleConnectionFailure(AnsibleRuntimeError):
''' the transport / connection_plugin had a fatal error '''
pass
class AnsibleAuthenticationFailure(AnsibleConnectionFailure):
'''invalid username/password/key'''
pass
class AnsibleCallbackError(AnsibleRuntimeError):
''' a callback failure '''
pass
class AnsibleTemplateError(AnsibleRuntimeError):
'''A template related error'''
pass
class AnsibleFilterError(AnsibleTemplateError):
''' a templating failure '''
pass
class AnsibleLookupError(AnsibleTemplateError):
''' a lookup failure '''
pass
class AnsibleUndefinedVariable(AnsibleTemplateError):
''' a templating failure '''
pass
class AnsibleFileNotFound(AnsibleRuntimeError):
''' a file missing failure '''
def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, paths=None, file_name=None):
self.file_name = file_name
self.paths = paths
if message:
message += "\n"
if self.file_name:
message += "Could not find or access '%s'" % to_text(self.file_name)
else:
message += "Could not find file"
if self.paths and isinstance(self.paths, Sequence):
searched = to_text('\n\t'.join(self.paths))
if message:
message += "\n"
message += "Searched in:\n\t%s" % searched
message += " on the Ansible Controller.\nIf you are using a module and expect the file to exist on the remote, see the remote_src option"
super(AnsibleFileNotFound, self).__init__(message=message, obj=obj, show_content=show_content,
suppress_extended_error=suppress_extended_error, orig_exc=orig_exc)
# These Exceptions are temporary, using them as flow control until we can get a better solution.
# DO NOT USE as they will probably be removed soon.
# We will port the action modules in our tree to use a context manager instead.
class AnsibleAction(AnsibleRuntimeError):
''' Base Exception for Action plugin flow control '''
def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, result=None):
super(AnsibleAction, self).__init__(message=message, obj=obj, show_content=show_content,
suppress_extended_error=suppress_extended_error, orig_exc=orig_exc)
if result is None:
self.result = {}
else:
self.result = result
class AnsibleActionSkip(AnsibleAction):
''' an action runtime skip'''
def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, result=None):
super(AnsibleActionSkip, self).__init__(message=message, obj=obj, show_content=show_content,
suppress_extended_error=suppress_extended_error, orig_exc=orig_exc, result=result)
self.result.update({'skipped': True, 'msg': message})
class AnsibleActionFail(AnsibleAction):
''' an action runtime failure'''
def __init__(self, message="", obj=None, show_content=True, suppress_extended_error=False, orig_exc=None, result=None):
super(AnsibleActionFail, self).__init__(message=message, obj=obj, show_content=show_content,
suppress_extended_error=suppress_extended_error, orig_exc=orig_exc, result=result)
self.result.update({'failed': True, 'msg': message})
class _AnsibleActionDone(AnsibleAction):
''' an action runtime early exit'''
pass
class AnsiblePluginError(AnsibleError):
''' base class for Ansible plugin-related errors that do not need AnsibleError contextual data '''
def __init__(self, message=None, plugin_load_context=None):
super(AnsiblePluginError, self).__init__(message)
self.plugin_load_context = plugin_load_context
class AnsiblePluginRemovedError(AnsiblePluginError):
''' a requested plugin has been removed '''
pass
class AnsiblePluginCircularRedirect(AnsiblePluginError):
'''a cycle was detected in plugin redirection'''
pass
class AnsibleCollectionUnsupportedVersionError(AnsiblePluginError):
'''a collection is not supported by this version of Ansible'''
pass
class AnsibleFilterTypeError(AnsibleTemplateError, TypeError):
''' a Jinja filter templating failure due to bad type'''
pass
|
rep/certificate-transparency
|
refs/heads/master
|
cpp/client/fix-chain.py
|
34
|
#!/usr/bin/env python
# Given a certificate chain that the log won't accept, try to fix it up
# into one that will be accepted.
# Based on pyasn1 example code.
from base64 import b64encode
from ct.crypto.pem import PemError
from ct.crypto.pem import from_pem
from pyasn1 import debug
# Why doesn't this work?
#from pyasn1.codec.ber import stDumpRawValue
from pyasn1.codec.der import decoder
from pyasn1.codec.der import encoder
from pyasn1.error import PyAsn1Error
from pyasn1.type import namedtype
from pyasn1.type import univ
from pyasn1_modules import pem
from pyasn1_modules import rfc2459
from pyasn1_modules import rfc2315
import sys
from urllib2 import urlopen
if len(sys.argv) != 2:
print """Usage:
$ %s somecertificates.pem""" % sys.argv[0]
sys.exit(-1)
cStart = '-----BEGIN CERTIFICATE-----'
cEnd = '-----END CERTIFICATE-----'
certType = rfc2459.Certificate()
# RFC 2459 is not sufficient for X509v3 certificates, extra stuff here.
# RFC 5280 4.2.2.1
id_pe_authorityInfoAccess = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.1')
class AccessDescription(univ.Sequence):
"""
AccessDescription ::= SEQUENCE {
accessMethod OBJECT IDENTIFIER,
accessLocation GeneralName }
"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('accessMethod', univ.ObjectIdentifier()),
namedtype.NamedType('accessLocation', rfc2459.GeneralName()))
class AuthorityInfoAccessSyntax(univ.SequenceOf):
"""
AuthorityInfoAccessSyntax ::=
SEQUENCE SIZE (1..MAX) OF AccessDescription
"""
# FIXME: SIZE not encoded.
componentType = AccessDescription()
id_ad_caIssuers = univ.ObjectIdentifier('1.3.6.1.5.5.7.48.2')
# End of RFC 5280 4.2.2.1
def getIssuersFromAIA(cert):
tbs = cert.getComponentByName('tbsCertificate')
extensions = tbs.getComponentByName('extensions') or []
allIssuers = []
for extension in extensions:
oid = extension.getComponentByName('extnID')
if oid != id_pe_authorityInfoAccess:
continue
print extension.prettyPrint()
value, rest = decoder.decode(extension.getComponentByName('extnValue'),
asn1Spec=univ.OctetString())
assert rest == ""
aia, rest = decoder.decode(value, asn1Spec=AuthorityInfoAccessSyntax())
assert rest == ""
print aia.prettyPrint()
for ad in aia:
oid = ad.getComponentByName('accessMethod')
if oid != id_ad_caIssuers:
continue
print ad.prettyPrint()
loc = ad.getComponentByName('accessLocation').\
getComponentByName('uniformResourceIdentifier')
print type(loc), loc
certHandle = urlopen(str(loc))
# RFC 5280 says this should either be 'application/pkix-cert' or
# 'application/pkcs7-mime' (in which case the result should be a
# "certs-only" PCKS#7 response, as specified in RFC 2797). Of
# course, we see other values, so just try both formats.
print certHandle.info().gettype()
issuer = certHandle.read()
# Have we got an (incorrect, but let's fix it) PEM encoded cert?
if issuer.startswith('-----'):
try:
(issuer, _) = from_pem(issuer, ['CERTIFICATE'])
except PemError as e:
print "PEM decode failed:", e
print "For cert:", issuer
# Is it a certificate?
try:
cert, rest = decoder.decode(issuer, asn1Spec=certType)
assert rest == ""
allIssuers.append(cert)
continue
except PyAsn1Error as e:
# On failure, try the next thing
print "Cert decode failed:", e
pass
# If not, it had better be PKCS#7 "certs-only"
try:
pkcs7, rest = decoder.decode(issuer, asn1Spec=rfc2315.ContentInfo())
assert rest == ""
assert pkcs7.getComponentByName('contentType') == rfc2315.signedData
signedData = decoder.decode(pkcs7.getComponentByName('content'),
asn1Spec=rfc2315.SignedData())
except PyAsn1Error as e:
# Give up
print "PKCS#7 decode also failed:", e
print "Skipping issuer URL:", loc
continue
for signedDatum in signedData:
# FIXME: why does this happen? Example is at
# http://crt.usertrust.com/AddTrustExternalCARoot.p7c.
if signedDatum == '':
print "** Skipping strange Any('') in PKCS7 **"
continue
certs = signedDatum.getComponentByName('certificates')
for c in certs:
cert = c.getComponentByName('certificate')
allIssuers.append(cert)
return allIssuers
# Note that this is a non-standard encoding of the DN, but unlike the
# standard encoding it captures nesting information. That is,
# attributes that are within a single RelativeDistinguishedName are
# surrounded by [].
def DNToString(dn):
rdns = dn.getComponent()
ret = ''
for rdn in rdns:
ret += '['
for attr in rdn:
attrType = attr.getComponentByName('type')
if attrType == rfc2459.emailAddress:
val, rest = decoder.decode(attr.getComponentByName('value'),
asn1Spec=rfc2459.Pkcs9email())
assert rest == ""
# Strictly speaking, this is IA5, not ASCII.
val = str(val).decode('ascii')
else:
val, rest = decoder.decode(attr.getComponentByName('value'),
asn1Spec=rfc2459.X520name())
assert rest == ""
valt = val.getName()
val = val.getComponent()
if valt == 'printableString':
val = str(val)
elif valt == 'teletexString':
# Strictly this is a T.61 string. T.61 no longer exists as a
# standard and some certs mark ISO 8859-1 as
# teletexString. And we should never see this, but we do.
val = str(val).decode('iso8859-1')
elif valt == 'utf8String':
val = str(val)
else:
print valt
assert False
assert val is not None
ret += '/' + str(attrType) + '=' + val
ret += ']'
return ret
certs = {}
inChain = []
certfile = open(sys.argv[1])
while 1:
idx, substrate = pem.readPemBlocksFromFile(certfile, (cStart, cEnd))
if not substrate:
break
cert, rest = decoder.decode(substrate, asn1Spec=certType)
assert rest == ""
tbs = cert.getComponentByName('tbsCertificate')
subjectDN = tbs.getComponentByName('subject')
print DNToString(subjectDN)
certs[DNToString(subjectDN)] = cert
inChain.append(cert)
#for subject, cert in certs.iteritems():
# print subject
# Assume the first cert in the chain is the final cert
outChain = [inChain[0]]
while True:
assert len(outChain) < 100
cert = outChain[-1]
tbs = cert.getComponentByName('tbsCertificate')
subjectDN = tbs.getComponentByName('subject')
print 'subject:', DNToString(subjectDN)
issuerDN = tbs.getComponentByName('issuer')
#print issuerDN.prettyPrint()
issuerDNstr = DNToString(issuerDN)
print 'issuer:', issuerDNstr
print
if issuerDN == subjectDN:
break
if issuerDNstr in certs:
issuer = certs[issuerDNstr]
else:
issuers = getIssuersFromAIA(cert)
if len(issuers) == 0:
print "Can't get issuer, giving up"
break
issuer = None
for i in issuers:
tbs = i.getComponentByName('tbsCertificate')
subjectDN = tbs.getComponentByName('subject')
print 'issuer subject:', DNToString(subjectDN)
if subjectDN == issuerDN:
issuer = i
break
assert issuer is not None
outChain.append(issuer)
if len(outChain) == 1:
tbs = outChain[0].getComponentByName('tbsCertificate')
subjectDN = tbs.getComponentByName('subject')
issuerDN = tbs.getComponentByName('issuer')
if subjectDN == issuerDN:
print "Chain consists of 1 self-signed certificate"
exit(1)
for cert in outChain:
print cStart
b64 = b64encode(encoder.encode(cert))
for n in range(0, len(b64), 64):
print b64[n:n+64]
print cEnd
print('*** %d PEM cert(s) deserialized, fixed chain is %d long' % (
len(inChain),
len(outChain)))
|
storm-computers/odoo
|
refs/heads/9.0
|
addons/website_customer/__init__.py
|
1023
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import controllers
import models
|
WangRobo/rosbridge_suite
|
refs/heads/develop
|
rosbridge_server/src/tornado/__init__.py
|
15
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Tornado web server and tools."""
from __future__ import absolute_import, division, print_function, with_statement
# version is a human-readable version number.
# version_info is a four-tuple for programmatic comparison. The first
# three numbers are the components of the version number. The fourth
# is zero for an official release, positive for a development branch,
# or negative for a release candidate or beta (after the base version
# number has been incremented)
version = "4.0.2"
version_info = (4, 0, 2, 0)
|
crsilveira/odoo-brazil-banking
|
refs/heads/8.0
|
l10n_br_account_payment_boleto/reports/report.py
|
3
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Account Payment Boleto module for Odoo
# Copyright (C) 2012-2015 KMEE (http://www.kmee.com.br)
# @author Luis Felipe Miléo <mileo@kmee.com.br>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from __future__ import with_statement
from openerp.report.render import render
from openerp.report.interface import report_int
from openerp import pooler
from ..boleto.document import Boleto
from openerp.osv import osv
class external_pdf(render):
def __init__(self, pdf):
render.__init__(self)
self.pdf = pdf
self.output_type = 'pdf'
def _render(self):
return self.pdf
class report_custom(report_int):
"""
Custom report for return boletos
"""
def create(self, cr, uid, ids, datas, context=False):
if not context:
context = {}
active_ids = context.get('active_ids')
active_model = context.get('active_model')
pool = pooler.get_pool(cr.dbname)
ids_move_lines = []
aml_obj = pool.get('account.move.line')
if active_model == 'account.invoice':
ai_obj = pool.get('account.invoice')
for account_invoice in ai_obj.browse(cr, uid, active_ids):
for move_line in account_invoice.move_line_receivable_id:
ids_move_lines.append(move_line.id)
elif active_model == 'account.move.line':
ids_move_lines = active_ids
else:
return False
boleto_list = aml_obj.send_payment(cr, uid, ids_move_lines)
if not boleto_list:
raise osv.except_osv(
'Error !', ('Não é possível gerar os boletos\n'
'Certifique-se que a fatura esteja confirmada e o forma de pagamento seja duplicatas'))
pdf_string = Boleto.get_pdfs(boleto_list)
self.obj = external_pdf(pdf_string)
self.obj.render()
return self.obj.pdf, 'pdf'
report_custom('report.l10n_br_account_payment_boleto.report')
|
manuelep/openshift_v3_test
|
refs/heads/master
|
wsgi/web2py/handlers/wsgihandler.py
|
25
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
This is a WSGI handler for Apache
Requires apache+mod_wsgi.
In httpd.conf put something like:
LoadModule wsgi_module modules/mod_wsgi.so
WSGIScriptAlias / /path/to/wsgihandler.py
"""
# change these parameters as required
LOGGING = False
SOFTCRON = False
import sys
import os
path = os.path.dirname(os.path.abspath(__file__))
os.chdir(path)
if not os.path.isdir('applications'):
raise RuntimeError('Running from the wrong folder')
sys.path = [path] + [p for p in sys.path if not p == path]
sys.stdout = sys.stderr
import gluon.main
if LOGGING:
application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase,
logfilename='httpserver.log',
profiler_dir=None)
else:
application = gluon.main.wsgibase
if SOFTCRON:
from gluon.settings import global_settings
global_settings.web2py_crontype = 'soft'
|
bholley/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/webdriver/navigation/refresh-page.py
|
200
|
import os
import sys
import unittest
sys.path.insert(1, os.path.abspath(os.path.join(__file__, "../..")))
import base_test
class RefreshPageTest(base_test.WebDriverBaseTest):
# Get a static page that must be the same upon refresh
def test_refreshPage(self):
self.driver.get(self.webserver.where_is('navigation/res/refreshPageStatic.html'))
body = self.driver.find_element_by_css("body").text
self.driver.execute_script("document.getElementById('body').innerHTML=''")
self.driver.refresh()
newbody = self.driver.find_element_by_css("body").text
self.assertEqual(body, newbody)
self.driver.get(self.webserver.where_is('navigation/res/refreshPageDynamic.html'))
body = self.driver.find_element_by_css("body").text
self.driver.refresh()
newbody = self.driver.find_element_by_css("body").text
self.assertNotEqual(body, newbody)
if __name__ == '__main__':
unittest.main()
|
johnttaylor/Outcast
|
refs/heads/master
|
bin/ceres.py
|
1
|
#! /usr/bin/env python3
"""
Ceres is a Outcast tool for managing Outcast Universes
===============================================================================
usage: ceres [options] <command> [<args>...]
ceres [--qry]
ceres [--help]
Options:
--user USER Set the user id (overrides the environment varaible
settings for the user id).
--passwd PASS Password for the asscoiated user id
-w WPATH Explicity sets the path of the workspace root
directory. The default behavior is to auto-detect the
workspace root directory.
--uverse UPATH Path of the package universe directory (overrides the
environment OUTCAST_PKGS_UVERSE setting).
--utype UTYPE Selects the type of Universe (overrides the
environment OUTCAST_UVERSE_TYPE setting). The default
Universe type is: git.
--now EPOCHSEC Explicilty provides the 'timestamp' for the distribute
operation. The default is use the current time when
the command is executed
--qry List the support Universe types.
-q Suppresses Warning messages
-v Be verbose
-h, --help Display help for common options/usage
Type 'ceres help <command>' for help on a specific command.
"""
import sys
import os
from subprocess import call
from docopt.docopt import docopt
import utils
from my_globals import CERES_VERSION
from my_globals import OUTCAST_UVERSE_TYPE
#------------------------------------------------------------------------------
def load_command( utype, name ):
try:
command_module = __import__("uverse_types.{}.{}".format(utype, name), fromlist=[utype])
except ImportError:
exit("%r is not a Ceres command. Use 'ceres help' for list of commands." % name)
return command_module
#------------------------------------------------------------------------------
def display_command_list(utype):
import pkgutil
p = __import__("uverse_types.git",fromlist=['git'] )
print( ' ' )
print( "Type 'ceres help <command>' for details. Type 'ceres --help' for base usage." )
print( "-------------------------------------------------------------------------------" )
for importer, modname, ispkg in pkgutil.iter_modules(p.__path__):
if ( not ispkg ):
cmd = load_command( utype, modname )
cmd.display_summary()
print( ' ' )
def display_uverse_types_list():
print( ' ' )
print( "Type 'ceres --help' for additional help." )
print( "-------------------------------------------------------------------------------" )
bpath = os.path.join( os.path.dirname(__file__), 'uverse_types' )
if ( os.path.exists( bpath ) ):
files = os.listdir(bpath)
for f in files:
if ( os.path.isdir(os.path.join(bpath,f)) ):
print(f)
print( ' ' )
#------------------------------------------------------------------------------
# Parse command line
args = docopt(__doc__, version=CERES_VERSION(), options_first=True )
# Display list of build engines supported
if ( args['--qry'] ):
display_uverse_types_list()
else:
# Determine the type of universe, aka which command set to use
utype = os.environ.get( OUTCAST_UVERSE_TYPE() )
if ( utype == None ):
utype = 'git'
if ( args['--utype'] ):
utype = args['--utype']
# Trap help on a specific command
if ( args['<command>'] == 'help' ):
# Display list of commands if none specified
if ( args['<args>'] == [] ):
display_command_list(utype)
# Display command specific help
else:
load_command( utype, args['<args>'][0] ).run( args, ['--help'] )
# Trap no command specified
elif ( args['<command>'] == None ):
docopt(__doc__,argv=['--help'])
# Run the command (if it exists)
else:
# Set quite & verbose modes
utils.set_quite_mode( args['-q'] )
utils.set_verbose_mode( args['-v'] )
# Handle the expection case(s) for needing the environment variables being set
skipenv = False
if ( args['<command>'] == 'set' ):
skipenv = True
# Housekeeping
utils.set_user_name( args, skipenv )
utils.set_password( args )
utils.set_uverse( args, skipenv )
utils.set_workspace( args )
# run the command
load_command( utype, args['<command>'] ).run( args, [args['<command>']] + args['<args>'] )
|
johnbachman/indra
|
refs/heads/master
|
indra/databases/taxonomy_client.py
|
4
|
"""Client to access the Entrez Taxonomy web service."""
import requests
base_url = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi'
def _send_search_request(term):
params = {
'db': 'taxonomy',
'term': term,
'retmode': 'json'
}
res = requests.get(base_url, params=params)
if not res.status_code == 200:
return None
return res.json().get('esearchresult')
def get_taxonomy_id(name):
"""Return the taxonomy ID corresponding to a taxonomy name.
Parameters
----------
name : str
The name of the taxonomy entry.
Example: "Severe acute respiratory syndrome coronavirus 2"
Returns
-------
str or None
The taxonomy ID corresponding to the given name or None
if not available.
"""
res = _send_search_request(name)
idlist = res.get('idlist')
if not idlist:
return None
return idlist[0]
|
binarydud/django-oscar
|
refs/heads/master
|
tests/integration/offer/status_tests.py
|
50
|
from decimal import Decimal as D
from django.test import TestCase
from oscar.core.loading import get_model
from oscar.test.factories import ConditionalOfferFactory
class TestAnOfferChangesStatusWhen(TestCase):
def setUp(self):
ConditionalOffer = get_model('offer', 'ConditionalOffer')
self.offer = ConditionalOfferFactory(
offer_type=ConditionalOffer.SITE)
def test_the_max_discount_is_exceeded(self):
self.offer.max_discount = D('10.00')
self.assertTrue(self.offer.is_open)
# Now bump the total discount and save to see if the status is
# automatically updated.
self.offer.total_discount += D('20.00')
self.offer.save()
self.assertFalse(self.offer.is_open)
def test_the_max_global_applications_is_exceeded(self):
self.offer.max_global_applications = 5
self.assertTrue(self.offer.is_open)
self.offer.num_applications += 10
self.offer.save()
self.assertFalse(self.offer.is_open)
|
tinchoss/Python_Android
|
refs/heads/master
|
python/src/Lib/plat-os2emx/SOCKET.py
|
134
|
# Generated by h2py from f:/emx/include/sys/socket.h
# Included from sys/types.h
FD_SETSIZE = 256
# Included from sys/uio.h
FREAD = 1
FWRITE = 2
SOCK_STREAM = 1
SOCK_DGRAM = 2
SOCK_RAW = 3
SOCK_RDM = 4
SOCK_SEQPACKET = 5
SO_DEBUG = 0x0001
SO_ACCEPTCONN = 0x0002
SO_REUSEADDR = 0x0004
SO_KEEPALIVE = 0x0008
SO_DONTROUTE = 0x0010
SO_BROADCAST = 0x0020
SO_USELOOPBACK = 0x0040
SO_LINGER = 0x0080
SO_OOBINLINE = 0x0100
SO_L_BROADCAST = 0x0200
SO_RCV_SHUTDOWN = 0x0400
SO_SND_SHUTDOWN = 0x0800
SO_SNDBUF = 0x1001
SO_RCVBUF = 0x1002
SO_SNDLOWAT = 0x1003
SO_RCVLOWAT = 0x1004
SO_SNDTIMEO = 0x1005
SO_RCVTIMEO = 0x1006
SO_ERROR = 0x1007
SO_TYPE = 0x1008
SO_OPTIONS = 0x1010
SOL_SOCKET = 0xffff
AF_UNSPEC = 0
AF_UNIX = 1
AF_INET = 2
AF_IMPLINK = 3
AF_PUP = 4
AF_CHAOS = 5
AF_NS = 6
AF_NBS = 7
AF_ISO = 7
AF_OSI = AF_ISO
AF_ECMA = 8
AF_DATAKIT = 9
AF_CCITT = 10
AF_SNA = 11
AF_DECnet = 12
AF_DLI = 13
AF_LAT = 14
AF_HYLINK = 15
AF_APPLETALK = 16
AF_NB = 17
AF_NETBIOS = AF_NB
AF_OS2 = AF_UNIX
AF_MAX = 18
PF_UNSPEC = AF_UNSPEC
PF_UNIX = AF_UNIX
PF_INET = AF_INET
PF_IMPLINK = AF_IMPLINK
PF_PUP = AF_PUP
PF_CHAOS = AF_CHAOS
PF_NS = AF_NS
PF_NBS = AF_NBS
PF_ISO = AF_ISO
PF_OSI = AF_ISO
PF_ECMA = AF_ECMA
PF_DATAKIT = AF_DATAKIT
PF_CCITT = AF_CCITT
PF_SNA = AF_SNA
PF_DECnet = AF_DECnet
PF_DLI = AF_DLI
PF_LAT = AF_LAT
PF_HYLINK = AF_HYLINK
PF_APPLETALK = AF_APPLETALK
PF_NB = AF_NB
PF_NETBIOS = AF_NB
PF_OS2 = AF_UNIX
PF_MAX = AF_MAX
SOMAXCONN = 5
MSG_OOB = 0x1
MSG_PEEK = 0x2
MSG_DONTROUTE = 0x4
MSG_EOR = 0x8
MSG_TRUNC = 0x10
MSG_CTRUNC = 0x20
MSG_WAITALL = 0x40
MSG_MAXIOVLEN = 16
SCM_RIGHTS = 0x01
MT_FREE = 0
MT_DATA = 1
MT_HEADER = 2
MT_SOCKET = 3
MT_PCB = 4
MT_RTABLE = 5
MT_HTABLE = 6
MT_ATABLE = 7
MT_SONAME = 8
MT_ZOMBIE = 9
MT_SOOPTS = 10
MT_FTABLE = 11
MT_RIGHTS = 12
MT_IFADDR = 13
MAXSOCKETS = 2048
|
toenuff/treadmill
|
refs/heads/master
|
lib/python/treadmill/syscall/inotify.py
|
1
|
"""Linux inotify(7) API wrapper module
"""
import collections
import logging
import operator
import os
import struct
import ctypes
from ctypes import (
c_int,
c_char_p,
c_uint32,
)
from ctypes.util import find_library
import enum
_LOGGER = logging.getLogger(__name__)
###############################################################################
# Map the C interface
_LIBC_PATH = find_library('c')
_LIBC = ctypes.CDLL(_LIBC_PATH, use_errno=True)
if any([getattr(_LIBC, func_name, None) is None
for func_name in ['inotify_init1',
'inotify_add_watch',
'inotify_rm_watch']]):
raise ImportError('Unsupported libc version found: %s' % _LIBC_PATH)
###############################################################################
# int inotify_init(void);
_INOTIFY_INIT1_DECL = ctypes.CFUNCTYPE(c_int, c_int, use_errno=True)
_INOTIFY_INIT1 = _INOTIFY_INIT1_DECL(('inotify_init1', _LIBC))
def inotify_init(flags=0):
"""Initializes a new inotify instance and returns a file descriptor
associated with a new inotify event queue.
:param ``INInitFlags`` flags:
Optional flag to control the inotify_init behavior.
"""
fileno = _INOTIFY_INIT1(flags)
if fileno < 0:
errno = ctypes.get_errno()
raise OSError(errno, os.strerror(errno),
'inotify_init1(%r)' % flags)
return fileno
###############################################################################
# Constants copied from sys/inotify.h
#
# See man inotify(7) for more details.
#
class INInitFlags(enum.IntEnum):
"""Flags supported by inotify_init(2)."""
#: Set the O_NONBLOCK file status flag on the new open file description.
#: Using this flag saves extra calls to fcntl(2) to achieve the same
#: result.
NONBLOCK = 04000
#: Set the close-on-exec (FD_CLOEXEC) flag on the new file descriptor. See
#: the description of the O_CLOEXEC flag in open(2) for reasons why this
#: may be useful.
CLOEXEC = 02000000
IN_NONBLOCK = INInitFlags.NONBLOCK
IN_CLOEXEC = INInitFlags.CLOEXEC
###############################################################################
# int inotify_add_watch(int fileno, const char *pathname, uint32_t mask);
_INOTIFY_ADD_WATCH_DECL = ctypes.CFUNCTYPE(c_int, c_int, c_char_p, c_uint32,
use_errno=True)
_INOTIFY_ADD_WATCH = _INOTIFY_ADD_WATCH_DECL(('inotify_add_watch', _LIBC))
def inotify_add_watch(fileno, path, mask):
"""Add a watch to an initialized inotify instance."""
watch_id = _INOTIFY_ADD_WATCH(fileno, path, mask)
if watch_id < 0:
errno = ctypes.get_errno()
raise OSError(errno, os.strerror(errno),
'inotify_add_watch(%r, %r, %r)' % (fileno, path, mask))
return watch_id
###############################################################################
# Constants copied from sys/inotify.h
#
# See man inotify(7) for more details.
#
class INAddWatchFlags(enum.IntEnum):
"""Special flags for inotify_add_watch.
"""
#: Do not follow a symbolic link.
DONT_FOLLOW = 0x02000000
#: Add to the mask of an existing watch.
MASK_ADD = 0x20000000
#: Only send event once.
ONESHOT = 0x80000000
#: Only watch the path if it's a directory.
ONLYDIR = 0x01000000
IN_DONT_FOLLOW = INAddWatchFlags.DONT_FOLLOW
IN_MASK_ADD = INAddWatchFlags.MASK_ADD
IN_ONESHOT = INAddWatchFlags.ONESHOT
IN_ONLYDIR = INAddWatchFlags.ONLYDIR
###############################################################################
# int inotify_rm_watch(int fileno, uint32_t wd);
_INOTIFY_RM_WATCH_DECL = ctypes.CFUNCTYPE(c_int, c_int, c_uint32,
use_errno=True)
_INOTIFY_RM_WATCH = _INOTIFY_RM_WATCH_DECL(('inotify_rm_watch', _LIBC))
def inotify_rm_watch(fileno, watch_id):
"""Remove an existing watch from an inotify instance."""
res = _INOTIFY_RM_WATCH(fileno, watch_id)
if res < 0:
errno = ctypes.get_errno()
raise OSError(errno, os.strerror(errno),
'inotify_rm_watch(%r, %r)' % (fileno, watch_id))
INOTIFY_EVENT_HDRSIZE = struct.calcsize('iIII')
###############################################################################
def _parse_buffer(event_buffer):
"""Parses an inotify event buffer of ``inotify_event`` structs read from
the inotify socket.
The inotify_event structure looks like this::
struct inotify_event {
__s32 wd; /* watch descriptor */
__u32 mask; /* watch mask */
__u32 cookie; /* cookie to synchronize two events */
__u32 len; /* length (including nulls) of name */
char name[0]; /* stub for possible name */
};
The ``cookie`` member of this struct is used to pair two related
events, for example, it pairs an IN_MOVED_FROM event with an
IN_MOVED_TO event.
"""
while len(event_buffer) >= INOTIFY_EVENT_HDRSIZE:
wd, mask, cookie, length = struct.unpack_from('iIII', event_buffer, 0)
name = event_buffer[
INOTIFY_EVENT_HDRSIZE:
INOTIFY_EVENT_HDRSIZE + length
]
name = name.rstrip('\x00')
event_buffer = event_buffer[INOTIFY_EVENT_HDRSIZE + length:]
yield wd, mask, cookie, name
assert len(event_buffer) == 0, ('Unparsed bytes left in buffer: %r' %
event_buffer)
###############################################################################
# Constants copied from sys/inotify.h
#
# See man inotify(7) for more details.
#
# Constants related to inotify. See man inotify(7) and sys/inotify.h
class INEvent(enum.IntEnum):
"""Inotify events.
"""
# Events triggered by user-space
#: File was accessed.
ACCESS = 0x00000001
#: Meta-data changed.
ATTRIB = 0x00000004
#: Unwritable file closed.
CLOSE_NOWRITE = 0x00000010
#: Writable file was closed.
CLOSE_WRITE = 0x00000008
#: Subfile was created.
CREATE = 0x00000100
#: Subfile was deleted.
DELETE = 0x00000200
#: Self was deleted.
DELETE_SELF = 0x00000400
#: File was modified.
MODIFY = 0x00000002
#: File was moved from X.
MOVED_FROM = 0x00000040
#: File was moved to Y.
MOVED_TO = 0x00000080
#: Self was moved.
MOVE_SELF = 0x00000800
#: File was opened.
OPEN = 0x00000020
# Events sent by the kernel
#: File was ignored.
IGNORED = 0x00008000
#: Event occurred against directory.
ISDIR = 0x40000000
#: Event queued overflowed.
Q_OVERFLOW = 0x00004000
#: Backing file system was unmounted.
UNMOUNT = 0x00002000
IN_ACCESS = INEvent.ACCESS
IN_ATTRIB = INEvent.ATTRIB
IN_CLOSE_NOWRITE = INEvent.CLOSE_NOWRITE
IN_CLOSE_WRITE = INEvent.CLOSE_WRITE
IN_CREATE = INEvent.CREATE
IN_DELETE = INEvent.DELETE
IN_DELETE_SELF = INEvent.DELETE_SELF
IN_MODIFY = INEvent.MODIFY
IN_MOVED_FROM = INEvent.MOVED_FROM
IN_MOVED_TO = INEvent.MOVED_TO
IN_MOVE_SELF = INEvent.MOVE_SELF
IN_OPEN = INEvent.OPEN
IN_IGNORED = INEvent.IGNORED
IN_ISDIR = INEvent.ISDIR
IN_Q_OVERFLOW = INEvent.Q_OVERFLOW
IN_UNMOUNT = INEvent.UNMOUNT
# Helper values for user-space events
IN_CLOSE = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE
IN_MOVE = IN_MOVED_FROM | IN_MOVED_TO
# All user-space events.
IN_ALL_EVENTS = reduce(operator.or_, [
IN_ACCESS,
IN_ATTRIB,
IN_CLOSE_NOWRITE,
IN_CLOSE_WRITE,
IN_CREATE,
IN_DELETE,
IN_DELETE_SELF,
IN_MODIFY,
IN_MOVED_FROM,
IN_MOVED_TO,
IN_MOVE_SELF,
IN_OPEN,
])
def _fmt_mask(mask):
"""Parse an Inotify event mask into indivitual event flags."""
masks = []
# Non-iterable value INEvent is used in an iterating context
for event in INEvent: # pylint: disable=E1133
if mask & event:
masks.append(event.name)
mask ^= event
if mask:
masks.append(hex(mask))
return masks
###############################################################################
# High level Python API
# W0232: No __init__
# E1001: This is not an oldstyle class
# pylint: disable=E1001,W0232
class InotifyEvent(collections.namedtuple('InotifyEvent',
'wd mask cookie src_path')):
"""
Inotify event struct wrapper.
:param wd:
Watch descriptor
:param mask:
Event mask
:param cookie:
Event cookie
:param src_path:
Event source path
"""
__slots__ = ()
@property
def is_modify(self):
"""Test mask shorthand."""
return bool(self.mask & IN_MODIFY)
@property
def is_close_write(self):
"""Test mask shorthand."""
return bool(self.mask & IN_CLOSE_WRITE)
@property
def is_close_nowrite(self):
"""Test mask shorthand."""
return bool(self.mask & IN_CLOSE_NOWRITE)
@property
def is_access(self):
"""Test mask shorthand."""
return bool(self.mask & IN_ACCESS)
@property
def is_delete(self):
"""Test mask shorthand."""
return bool(self.mask & IN_DELETE)
@property
def is_delete_self(self):
"""Test mask shorthand."""
return bool(self.mask & IN_DELETE_SELF)
@property
def is_create(self):
"""Test mask shorthand."""
return bool(self.mask & IN_CREATE)
@property
def is_moved_from(self):
"""Test mask shorthand."""
return bool(self.mask & IN_MOVED_FROM)
@property
def is_moved_to(self):
"""Test mask shorthand."""
return bool(self.mask & IN_MOVED_TO)
@property
def is_move(self):
"""Test mask shorthand."""
return bool(self.mask & IN_MOVE)
@property
def is_move_self(self):
"""Test mask shorthand."""
return bool(self.mask & IN_MOVE_SELF)
@property
def is_attrib(self):
"""Test mask shorthand."""
return bool(self.mask & IN_ATTRIB)
@property
def is_ignored(self):
"""Test mask shorthand."""
return bool(self.mask & IN_IGNORED)
@property
def is_directory(self):
"""Test mask shorthand."""
return bool(self.mask & IN_ISDIR)
def __repr__(self):
masks = _fmt_mask(self.mask)
return ('<InotifyEvent: src_path=%s, wd=%d, mask=%s, cookie=%d>') % (
self.src_path,
self.wd,
'|'.join(masks),
self.cookie,
)
DEFAULT_NUM_EVENTS = 2048
DEFAULT_EVENT_BUFFER_SIZE = DEFAULT_NUM_EVENTS * INOTIFY_EVENT_HDRSIZE
DEFAULT_EVENTS = IN_ALL_EVENTS
class Inotify(object):
"""Inotify system interface."""
def __init__(self, flags):
"""Initialize a new Inotify object.
"""
# The file descriptor associated with the inotify instance.
inotify_fd = inotify_init(flags)
self._inotify_fd = inotify_fd
self._paths = {}
def fileno(self):
"""The file descriptor associated with the inotify instance."""
return self._inotify_fd
def close(self):
"""Close the inotify filedescriptor.
NOTE: After call this, this object will be unusable.
"""
os.close(self._inotify_fd)
def add_watch(self, path, event_mask=DEFAULT_EVENTS):
"""
Adds a watch for the given path to monitor events specified by the
mask.
:param path:
Path to monitor
:type path:
``str``
:param event_mask:
*optional* Bit mask of the request events.
:type event_mask:
``int``
:returns:
Unique watch descriptor identifier
:rtype:
``int``
"""
path = os.path.normpath(path)
watch_id = inotify_add_watch(
self._inotify_fd,
path,
event_mask | IN_MASK_ADD
)
self._paths[watch_id] = path
return watch_id
def remove_watch(self, watch_id):
"""
Removes a watch.
:param watch_id:
Watch descriptor returned by :meth:`~Inotify.add_watch`
:type watch_id:
``int``
:returns:
``None``
"""
inotify_rm_watch(self._inotify_fd, watch_id)
def read_events(self, event_buffer_size=DEFAULT_EVENT_BUFFER_SIZE):
"""
Reads events from inotify and yields them.
:param event_buffer_size:
*optional* Buffer size while reading the inotify socket
:type event_buffer_size:
``int``
:returns:
List of :class:`InotifyEvent` instances
:rtype:
``list``
"""
if not self._paths:
return []
event_buffer = os.read(self._inotify_fd, event_buffer_size)
event_list = []
for wd, mask, cookie, name in _parse_buffer(event_buffer):
wd_path = self._paths[wd]
src_path = os.path.normpath(os.path.join(wd_path, name))
inotify_event = InotifyEvent(wd, mask, cookie, src_path)
_LOGGER.debug('Received event %r', inotify_event)
if inotify_event.mask & IN_IGNORED:
# Clean up deleted watches
del self._paths[wd]
event_list.append(inotify_event)
return event_list
|
eblade/pyroman2
|
refs/heads/master
|
pyroman/pdf/object.py
|
1
|
class Object:
def __init__(self, id=0):
self.object_identifier = id
self._content = []
def __str__(self):
return ''.join([x.__str__() for x in self._content])
def put(self, obj):
self._content.append(obj)
@property
def obj(self):
if self.object_identifier == 0:
raise TypeError("Indirect object must be given an object identifiier")
return "%i 0 obj\n%s\nendobj\n" % (self.object_identifier, self.inline)
@property
def inline(self):
if self.object_identifier == 0:
raise TypeError("Indirect object must be given an object identifiier")
return self.__str__()
@property
def objects(self):
return ''.join([x.obj for x in self._content])
@property
def reference(self):
if self.object_identifier == 0:
raise TypeError("Indirect object must be given an object identifiier")
return "%i 0 R" % self.object_identifier
def __len__(self):
return sum([len(x) for x in self._content])
@property
def count(self):
return len(self._content)
class Boolean(Object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._content = True
def __str__(self):
return 'true' if self._content else 'false'
def put(self, value):
self._content = bool(value)
class Integer(Object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._content = 0
def __str__(self):
return str(self._content)
def put(self, value):
self._content = int(value)
class Real(Object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._content = 0.0
def __str__(self):
return str(self._content)
def put(self, value):
self._content = float(value)
class String(Object):
def __init__(self, value, *args, **kwargs):
super().__init__(*args, **kwargs)
self._content = value
def __str__(self):
return "(%s)" % super().__str__().replace(')', '\)').replace('(', '\(')
class HexadecimalString(Object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._content = ''
def __str__(self):
return "<%s>\n" % self._content
def put(self, value):
self._content = str(value)
class Name(HexadecimalString):
def __init__(self, value, *args, **kwargs):
super().__init__(*args, **kwargs)
self._content = value
def __str__(self):
return "/%s" % self._content
class Array(Object):
def __str__(self):
return "[ %s ]" % '\n'.join([x.__str__() for x in self._content])
class Dictionary(Object):
def put(self, key, value):
self._content.append((key, value))
def __str__(self):
return "<< %s >>" % '\n '.join(["%s %s" % (x[0].__str__(),
x[1].__str__()) for x in self._content])
class Null:
def __str__(self):
return "null"
class Rectangle(Object):
def __init__(self, llx, lly, urx, ury, *args, **kwargs):
super().__init__(*args, **kwargs)
self.llx = llx
self.lly = lly
self.urx = urx
self.ury = ury
def __str__(self):
return "[%s %s %s %s]" % (
str(self.llx),
str(self.lly),
str(self.urx),
str(self.ury)
)
def marginize(self, top, bottom, left, right):
return Rectangle(self.llx+left, self.lly+bottom, self.urx-right, self.ury-top)
|
daichi-yoshikawa/dnn
|
refs/heads/master
|
dnnet/layers/affine.py
|
1
|
# Authors: Daichi Yoshikawa <daichi.yoshikawa@gmail.com>
# License: BSD 3 clause
from dnnet.layers.layer import Layer
from dnnet.ext_mathlibs import cp, np
from dnnet.training.weight_initialization import DefaultInitialization
from dnnet.utils.nn_utils import is_multi_channels_image
from dnnet.utils.nn_utils import prod, asnumpy, flatten, unflatten
class AffineLayer(Layer):
"""Implement affine transform of matrix.
Derived class of Layer.
Parameters
----------
w : np.array
Weight in 2d array.
dw : np.array
Gradient of weight in 2d array.
x : np.array
Extended parent layer's output in 2d array.
This consists of original parent layer's output and bias term.
"""
def __init__(self, output_shape, weight_initialization=DefaultInitialization()):
self.output_shape = output_shape
self.weight_initialization = weight_initialization
self.x = None
self.multi_channels_image = False
def set_dtype(self, dtype):
self.dtype = dtype
def get_type(self):
return 'affine'
def set_parent(self, parent):
Layer.set_parent(self, parent)
w_rows = prod(self.input_shape)
w_cols = prod(self.output_shape)
self.w = self.weight_initialization.get(w_rows, w_cols, self)
self.w = np.r_[np.zeros((1, w_cols)), self.w]
self.w = self.w.astype(self.dtype)
self.dw = np.zeros_like(self.w, dtype=self.w.dtype)
def has_weight(self):
return True
def forward(self, x):
self.__forward(x)
self.child.forward(self.fire)
def backward(self, dy):
self.__backward(dy)
self.parent.backward(self.backfire)
def predict(self, x):
self.__forward(x)
return self.child.predict(self.fire)
def __forward(self, x):
x = cp.array(x)
if is_multi_channels_image(self.input_shape):
x = flatten(x, self.input_shape)
# Add bias terms.
x = cp.c_[cp.ones((x.shape[0], 1), dtype=self.dtype), x]
fire = cp.dot(x, cp.array(self.w))
if is_multi_channels_image(self.output_shape):
fire = unflatten(fire, self.output_shape)
self.x = asnumpy(x)
self.fire = asnumpy(fire)
def __backward(self, dy):
dy = cp.array(dy)
if is_multi_channels_image(self.output_shape):
dy = flatten(dy, self.output_shape)
batch_size = self.x.shape[0]
self.dw = asnumpy(self.dtype(1.) / batch_size * cp.dot(cp.array(self.x).T, dy))
backfire = cp.dot(dy, cp.array(self.w[1:, :]).T)
if is_multi_channels_image(self.input_shape):
backfire = unflatten(backfire, self.input_shape)
self.backfire = asnumpy(backfire)
|
tomtor/QGIS
|
refs/heads/master
|
python/plugins/processing/modeler/ModelerParametersDialog.py
|
23
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
ModelerParametersDialog.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import webbrowser
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtWidgets import (QDialog, QDialogButtonBox, QLabel, QLineEdit,
QFrame, QPushButton, QSizePolicy, QVBoxLayout,
QHBoxLayout, QWidget, QTabWidget, QTextEdit)
from qgis.PyQt.QtGui import QColor
from qgis.core import (Qgis,
QgsProject,
QgsProcessingParameterDefinition,
QgsProcessingModelOutput,
QgsProcessingModelChildAlgorithm,
QgsProcessingModelChildParameterSource,
QgsProcessingOutputDefinition)
from qgis.gui import (QgsGui,
QgsMessageBar,
QgsScrollArea,
QgsFilterLineEdit,
QgsHelp,
QgsProcessingContextGenerator,
QgsProcessingModelerParameterWidget,
QgsProcessingParameterWidgetContext,
QgsPanelWidget,
QgsPanelWidgetStack,
QgsColorButton,
QgsModelChildDependenciesWidget)
from qgis.utils import iface
from processing.gui.wrappers import WidgetWrapperFactory
from processing.gui.wrappers import InvalidParameterValue
from processing.tools.dataobjects import createContext
from processing.gui.wrappers import WidgetWrapper
class ModelerParametersDialog(QDialog):
def __init__(self, alg, model, algName=None, configuration=None):
super().__init__()
self.setObjectName('ModelerParametersDialog')
self.setModal(True)
if iface is not None:
self.setStyleSheet(iface.mainWindow().styleSheet())
# dammit this is SUCH as mess... stupid stable API
self._alg = alg # The algorithm to define in this dialog. It is an instance of QgsProcessingAlgorithm
self.model = model # The model this algorithm is going to be added to. It is an instance of QgsProcessingModelAlgorithm
self.childId = algName # The name of the algorithm in the model, in case we are editing it and not defining it for the first time
self.configuration = configuration
self.context = createContext()
self.setWindowTitle(self._alg.displayName())
self.widget = ModelerParametersWidget(alg, model, algName, configuration, context=self.context, dialog=self)
QgsGui.enableAutoGeometryRestore(self)
self.buttonBox = QDialogButtonBox()
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel | QDialogButtonBox.Ok | QDialogButtonBox.Help)
self.buttonBox.accepted.connect(self.okPressed)
self.buttonBox.rejected.connect(self.reject)
self.buttonBox.helpRequested.connect(self.openHelp)
mainLayout = QVBoxLayout()
mainLayout.addWidget(self.widget, 1)
mainLayout.addWidget(self.buttonBox)
self.setLayout(mainLayout)
def algorithm(self):
return self._alg
def setComments(self, text):
self.widget.setComments(text)
def comments(self):
return self.widget.comments()
def setCommentColor(self, color):
self.widget.setCommentColor(color)
def commentColor(self):
return self.widget.commentColor()
def switchToCommentTab(self):
self.widget.switchToCommentTab()
def getAvailableValuesOfType(self, paramType, outTypes=[], dataTypes=[]):
# upgrade paramType to list
if paramType is None:
paramType = []
elif not isinstance(paramType, (tuple, list)):
paramType = [paramType]
if outTypes is None:
outTypes = []
elif not isinstance(outTypes, (tuple, list)):
outTypes = [outTypes]
return self.model.availableSourcesForChild(self.childId, [p.typeName() for p in paramType if
issubclass(p, QgsProcessingParameterDefinition)],
[o.typeName() for o in outTypes if
issubclass(o, QgsProcessingOutputDefinition)], dataTypes)
def resolveValueDescription(self, value):
if isinstance(value, QgsProcessingModelChildParameterSource):
if value.source() == QgsProcessingModelChildParameterSource.StaticValue:
return value.staticValue()
elif value.source() == QgsProcessingModelChildParameterSource.ModelParameter:
return self.model.parameterDefinition(value.parameterName()).description()
elif value.source() == QgsProcessingModelChildParameterSource.ChildOutput:
alg = self.model.childAlgorithm(value.outputChildId())
output_name = alg.algorithm().outputDefinition(value.outputName()).description()
# see if this output has been named by the model designer -- if so, we use that friendly name
for name, output in alg.modelOutputs().items():
if output.childOutputName() == value.outputName():
output_name = name
break
return self.tr("'{0}' from algorithm '{1}'").format(output_name, alg.description())
return value
def setPreviousValues(self):
self.widget.setPreviousValues()
def createAlgorithm(self):
return self.widget.createAlgorithm()
def okPressed(self):
if self.createAlgorithm() is not None:
self.accept()
def openHelp(self):
algHelp = self.widget.algorithm().helpUrl()
if not algHelp:
algHelp = QgsHelp.helpUrl("processing_algs/{}/{}.html#{}".format(
self.widget.algorithm().provider().helpId(), self.algorithm().groupId(),
"{}{}".format(self.algorithm().provider().helpId(), self.algorithm().name()))).toString()
if algHelp not in [None, ""]:
webbrowser.open(algHelp)
class ModelerParametersPanelWidget(QgsPanelWidget):
def __init__(self, alg, model, algName=None, configuration=None, dialog=None, context=None):
super().__init__()
self._alg = alg # The algorithm to define in this dialog. It is an instance of QgsProcessingAlgorithm
self.model = model # The model this algorithm is going to be added to. It is an instance of QgsProcessingModelAlgorithm
self.childId = algName # The name of the algorithm in the model, in case we are editing it and not defining it for the first time
self.configuration = configuration
self.context = context
self.dialog = dialog
self.widget_labels = {}
class ContextGenerator(QgsProcessingContextGenerator):
def __init__(self, context):
super().__init__()
self.processing_context = context
def processingContext(self):
return self.processing_context
self.context_generator = ContextGenerator(self.context)
self.setupUi()
self.params = None
def algorithm(self):
return self._alg
def setupUi(self):
self.showAdvanced = False
self.wrappers = {}
self.algorithmItem = None
self.mainLayout = QVBoxLayout()
self.mainLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout = QVBoxLayout()
self.bar = QgsMessageBar()
self.bar.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)
self.verticalLayout.addWidget(self.bar)
hLayout = QHBoxLayout()
hLayout.setContentsMargins(0, 0, 0, 0)
descriptionLabel = QLabel(self.tr("Description"))
self.descriptionBox = QLineEdit()
self.descriptionBox.setText(self._alg.displayName())
hLayout.addWidget(descriptionLabel)
hLayout.addWidget(self.descriptionBox)
self.verticalLayout.addLayout(hLayout)
line = QFrame()
line.setFrameShape(QFrame.HLine)
line.setFrameShadow(QFrame.Sunken)
self.verticalLayout.addWidget(line)
widget_context = QgsProcessingParameterWidgetContext()
widget_context.setProject(QgsProject.instance())
if iface is not None:
widget_context.setMapCanvas(iface.mapCanvas())
widget_context.setActiveLayer(iface.activeLayer())
widget_context.setModel(self.model)
widget_context.setModelChildAlgorithmId(self.childId)
self.algorithmItem = QgsGui.instance().processingGuiRegistry().algorithmConfigurationWidget(self._alg)
if self.algorithmItem:
self.algorithmItem.setWidgetContext(widget_context)
self.algorithmItem.registerProcessingContextGenerator(self.context_generator)
if self.configuration:
self.algorithmItem.setConfiguration(self.configuration)
self.verticalLayout.addWidget(self.algorithmItem)
for param in self._alg.parameterDefinitions():
if param.flags() & QgsProcessingParameterDefinition.FlagAdvanced:
self.advancedButton = QPushButton()
self.advancedButton.setText(self.tr('Show advanced parameters'))
self.advancedButton.clicked.connect(
self.showAdvancedParametersClicked)
advancedButtonHLayout = QHBoxLayout()
advancedButtonHLayout.addWidget(self.advancedButton)
advancedButtonHLayout.addStretch()
self.verticalLayout.addLayout(advancedButtonHLayout)
break
for param in self._alg.parameterDefinitions():
if param.isDestination() or param.flags() & QgsProcessingParameterDefinition.FlagHidden:
continue
wrapper = WidgetWrapperFactory.create_wrapper(param, self.dialog)
self.wrappers[param.name()] = wrapper
wrapper.setWidgetContext(widget_context)
wrapper.registerProcessingContextGenerator(self.context_generator)
if issubclass(wrapper.__class__, QgsProcessingModelerParameterWidget):
widget = wrapper
else:
widget = wrapper.widget
if widget is not None:
if issubclass(wrapper.__class__, QgsProcessingModelerParameterWidget):
label = wrapper.createLabel()
else:
tooltip = param.description()
widget.setToolTip(tooltip)
label = wrapper.label
self.widget_labels[param.name()] = label
if param.flags() & QgsProcessingParameterDefinition.FlagAdvanced:
label.setVisible(self.showAdvanced)
widget.setVisible(self.showAdvanced)
self.verticalLayout.addWidget(label)
self.verticalLayout.addWidget(widget)
for output in self._alg.destinationParameterDefinitions():
if output.flags() & QgsProcessingParameterDefinition.FlagHidden:
continue
widget = QgsGui.processingGuiRegistry().createModelerParameterWidget(self.model,
self.childId,
output,
self.context)
widget.setDialog(self.dialog)
widget.setWidgetContext(widget_context)
widget.registerProcessingContextGenerator(self.context_generator)
self.wrappers[output.name()] = widget
item = QgsFilterLineEdit()
if hasattr(item, 'setPlaceholderText'):
item.setPlaceholderText(self.tr('[Enter name if this is a final result]'))
label = widget.createLabel()
if label is not None:
self.verticalLayout.addWidget(label)
self.verticalLayout.addWidget(widget)
label = QLabel(' ')
self.verticalLayout.addWidget(label)
label = QLabel(self.tr('Dependencies'))
self.dependencies_panel = QgsModelChildDependenciesWidget(self, self.model, self.childId)
self.verticalLayout.addWidget(label)
self.verticalLayout.addWidget(self.dependencies_panel)
self.verticalLayout.addStretch(1000)
self.setPreviousValues()
self.verticalLayout2 = QVBoxLayout()
self.verticalLayout2.setSpacing(2)
self.verticalLayout2.setMargin(0)
self.paramPanel = QWidget()
self.paramPanel.setLayout(self.verticalLayout)
self.scrollArea = QgsScrollArea()
self.scrollArea.setWidget(self.paramPanel)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setFrameStyle(QFrame.NoFrame)
self.verticalLayout2.addWidget(self.scrollArea)
w = QWidget()
w.setLayout(self.verticalLayout2)
self.mainLayout.addWidget(w)
self.setLayout(self.mainLayout)
def showAdvancedParametersClicked(self):
self.showAdvanced = not self.showAdvanced
if self.showAdvanced:
self.advancedButton.setText(self.tr('Hide advanced parameters'))
else:
self.advancedButton.setText(self.tr('Show advanced parameters'))
for param in self._alg.parameterDefinitions():
if param.flags() & QgsProcessingParameterDefinition.FlagAdvanced:
wrapper = self.wrappers[param.name()]
if issubclass(wrapper.__class__, QgsProcessingModelerParameterWidget):
wrapper.setVisible(self.showAdvanced)
else:
wrapper.widget.setVisible(self.showAdvanced)
self.widget_labels[param.name()].setVisible(self.showAdvanced)
def setPreviousValues(self):
if self.childId is not None:
alg = self.model.childAlgorithm(self.childId)
self.descriptionBox.setText(alg.description())
for param in alg.algorithm().parameterDefinitions():
if param.isDestination() or param.flags() & QgsProcessingParameterDefinition.FlagHidden:
continue
value = None
if param.name() in alg.parameterSources():
value = alg.parameterSources()[param.name()]
if isinstance(value, list) and len(value) == 1:
value = value[0]
elif isinstance(value, list) and len(value) == 0:
value = None
wrapper = self.wrappers[param.name()]
if issubclass(wrapper.__class__, QgsProcessingModelerParameterWidget):
if value is None:
value = QgsProcessingModelChildParameterSource.fromStaticValue(param.defaultValue())
wrapper.setWidgetValue(value)
else:
if value is None:
value = param.defaultValue()
if isinstance(value,
QgsProcessingModelChildParameterSource) and value.source() == QgsProcessingModelChildParameterSource.StaticValue:
value = value.staticValue()
wrapper.setValue(value)
for output in self.algorithm().destinationParameterDefinitions():
if output.flags() & QgsProcessingParameterDefinition.FlagHidden:
continue
model_output_name = None
for name, out in alg.modelOutputs().items():
if out.childId() == self.childId and out.childOutputName() == output.name():
# this destination parameter is linked to a model output
model_output_name = out.name()
break
value = None
if model_output_name is None and output.name() in alg.parameterSources():
value = alg.parameterSources()[output.name()]
if isinstance(value, list) and len(value) == 1:
value = value[0]
elif isinstance(value, list) and len(value) == 0:
value = None
wrapper = self.wrappers[output.name()]
if model_output_name is not None:
wrapper.setToModelOutput(model_output_name)
elif value is not None or output.defaultValue() is not None:
if value is None:
value = QgsProcessingModelChildParameterSource.fromStaticValue(output.defaultValue())
wrapper.setWidgetValue(value)
self.dependencies_panel.setValue(alg.dependencies())
def createAlgorithm(self):
alg = QgsProcessingModelChildAlgorithm(self._alg.id())
if not self.childId:
alg.generateChildId(self.model)
else:
alg.setChildId(self.childId)
alg.setDescription(self.descriptionBox.text())
if self.algorithmItem:
alg.setConfiguration(self.algorithmItem.configuration())
self._alg = alg.algorithm().create(self.algorithmItem.configuration())
for param in self._alg.parameterDefinitions():
if param.isDestination() or param.flags() & QgsProcessingParameterDefinition.FlagHidden:
continue
try:
wrapper = self.wrappers[param.name()]
if issubclass(wrapper.__class__, WidgetWrapper):
val = wrapper.value()
elif issubclass(wrapper.__class__, QgsProcessingModelerParameterWidget):
val = wrapper.value()
else:
val = wrapper.parameterValue()
except InvalidParameterValue:
val = None
if isinstance(val, QgsProcessingModelChildParameterSource):
val = [val]
elif not (isinstance(val, list) and all(
[isinstance(subval, QgsProcessingModelChildParameterSource) for subval in val])):
val = [QgsProcessingModelChildParameterSource.fromStaticValue(val)]
valid = True
for subval in val:
if (isinstance(subval, QgsProcessingModelChildParameterSource)
and subval.source() == QgsProcessingModelChildParameterSource.StaticValue
and not param.checkValueIsAcceptable(subval.staticValue())) \
or (subval is None and not param.flags() & QgsProcessingParameterDefinition.FlagOptional):
valid = False
break
if valid:
alg.addParameterSources(param.name(), val)
outputs = {}
for output in self._alg.destinationParameterDefinitions():
if not output.flags() & QgsProcessingParameterDefinition.FlagHidden:
wrapper = self.wrappers[output.name()]
if wrapper.isModelOutput():
name = wrapper.modelOutputName()
if name:
model_output = QgsProcessingModelOutput(name, name)
model_output.setChildId(alg.childId())
model_output.setChildOutputName(output.name())
outputs[name] = model_output
else:
val = wrapper.value()
if isinstance(val, QgsProcessingModelChildParameterSource):
val = [val]
alg.addParameterSources(output.name(), val)
if output.flags() & QgsProcessingParameterDefinition.FlagIsModelOutput:
if output.name() not in outputs:
model_output = QgsProcessingModelOutput(output.name(), output.name())
model_output.setChildId(alg.childId())
model_output.setChildOutputName(output.name())
outputs[output.name()] = model_output
alg.setModelOutputs(outputs)
alg.setDependencies(self.dependencies_panel.value())
return alg
class ModelerParametersWidget(QWidget):
def __init__(self, alg, model, algName=None, configuration=None, dialog=None, context=None):
super().__init__()
self._alg = alg # The algorithm to define in this dialog. It is an instance of QgsProcessingAlgorithm
self.model = model # The model this algorithm is going to be added to. It is an instance of QgsProcessingModelAlgorithm
self.childId = algName # The name of the algorithm in the model, in case we are editing it and not defining it for the first time
self.configuration = configuration
self.context = context
self.dialog = dialog
self.widget = ModelerParametersPanelWidget(alg, model, algName, configuration, dialog, context)
class ContextGenerator(QgsProcessingContextGenerator):
def __init__(self, context):
super().__init__()
self.processing_context = context
def processingContext(self):
return self.processing_context
self.context_generator = ContextGenerator(self.context)
self.setupUi()
self.params = None
def algorithm(self):
return self._alg
def switchToCommentTab(self):
self.tab.setCurrentIndex(1)
self.commentEdit.setFocus()
self.commentEdit.selectAll()
def setupUi(self):
self.mainLayout = QVBoxLayout()
self.mainLayout.setContentsMargins(0, 0, 0, 0)
self.tab = QTabWidget()
self.mainLayout.addWidget(self.tab)
self.param_widget = QgsPanelWidgetStack()
self.widget.setDockMode(True)
self.param_widget.setMainPanel(self.widget)
self.tab.addTab(self.param_widget, self.tr('Properties'))
self.commentLayout = QVBoxLayout()
self.commentEdit = QTextEdit()
self.commentEdit.setAcceptRichText(False)
self.commentLayout.addWidget(self.commentEdit, 1)
hl = QHBoxLayout()
hl.setContentsMargins(0, 0, 0, 0)
hl.addWidget(QLabel(self.tr('Color')))
self.comment_color_button = QgsColorButton()
self.comment_color_button.setAllowOpacity(True)
self.comment_color_button.setWindowTitle(self.tr('Comment Color'))
self.comment_color_button.setShowNull(True, self.tr('Default'))
hl.addWidget(self.comment_color_button)
self.commentLayout.addLayout(hl)
w2 = QWidget()
w2.setLayout(self.commentLayout)
self.tab.addTab(w2, self.tr('Comments'))
self.setLayout(self.mainLayout)
def setComments(self, text):
self.commentEdit.setPlainText(text)
def comments(self):
return self.commentEdit.toPlainText()
def setCommentColor(self, color):
if color.isValid():
self.comment_color_button.setColor(color)
else:
self.comment_color_button.setToNull()
def commentColor(self):
return self.comment_color_button.color() if not self.comment_color_button.isNull() else QColor()
def setPreviousValues(self):
self.widget.setPreviousValues()
def createAlgorithm(self):
alg = self.widget.createAlgorithm()
if alg:
alg.comment().setDescription(self.comments())
alg.comment().setColor(self.commentColor())
return alg
|
endocode/linux
|
refs/heads/master
|
tools/perf/scripts/python/export-to-postgresql.py
|
293
|
# export-to-postgresql.py: export perf data to a postgresql database
# Copyright (c) 2014, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
import os
import sys
import struct
import datetime
# To use this script you will need to have installed package python-pyside which
# provides LGPL-licensed Python bindings for Qt. You will also need the package
# libqt4-sql-psql for Qt postgresql support.
#
# The script assumes postgresql is running on the local machine and that the
# user has postgresql permissions to create databases. Examples of installing
# postgresql and adding such a user are:
#
# fedora:
#
# $ sudo yum install postgresql postgresql-server python-pyside qt-postgresql
# $ sudo su - postgres -c initdb
# $ sudo service postgresql start
# $ sudo su - postgres
# $ createuser <your user id here>
# Shall the new role be a superuser? (y/n) y
#
# ubuntu:
#
# $ sudo apt-get install postgresql python-pyside.qtsql libqt4-sql-psql
# $ sudo su - postgres
# $ createuser -s <your user id here>
#
# An example of using this script with Intel PT:
#
# $ perf record -e intel_pt//u ls
# $ perf script -s ~/libexec/perf-core/scripts/python/export-to-postgresql.py pt_example branches calls
# 2015-05-29 12:49:23.464364 Creating database...
# 2015-05-29 12:49:26.281717 Writing to intermediate files...
# 2015-05-29 12:49:27.190383 Copying to database...
# 2015-05-29 12:49:28.140451 Removing intermediate files...
# 2015-05-29 12:49:28.147451 Adding primary keys
# 2015-05-29 12:49:28.655683 Adding foreign keys
# 2015-05-29 12:49:29.365350 Done
#
# To browse the database, psql can be used e.g.
#
# $ psql pt_example
# pt_example=# select * from samples_view where id < 100;
# pt_example=# \d+
# pt_example=# \d+ samples_view
# pt_example=# \q
#
# An example of using the database is provided by the script
# call-graph-from-postgresql.py. Refer to that script for details.
#
# Tables:
#
# The tables largely correspond to perf tools' data structures. They are largely self-explanatory.
#
# samples
#
# 'samples' is the main table. It represents what instruction was executing at a point in time
# when something (a selected event) happened. The memory address is the instruction pointer or 'ip'.
#
# calls
#
# 'calls' represents function calls and is related to 'samples' by 'call_id' and 'return_id'.
# 'calls' is only created when the 'calls' option to this script is specified.
#
# call_paths
#
# 'call_paths' represents all the call stacks. Each 'call' has an associated record in 'call_paths'.
# 'calls_paths' is only created when the 'calls' option to this script is specified.
#
# branch_types
#
# 'branch_types' provides descriptions for each type of branch.
#
# comm_threads
#
# 'comm_threads' shows how 'comms' relates to 'threads'.
#
# comms
#
# 'comms' contains a record for each 'comm' - the name given to the executable that is running.
#
# dsos
#
# 'dsos' contains a record for each executable file or library.
#
# machines
#
# 'machines' can be used to distinguish virtual machines if virtualization is supported.
#
# selected_events
#
# 'selected_events' contains a record for each kind of event that has been sampled.
#
# symbols
#
# 'symbols' contains a record for each symbol. Only symbols that have samples are present.
#
# threads
#
# 'threads' contains a record for each thread.
#
# Views:
#
# Most of the tables have views for more friendly display. The views are:
#
# calls_view
# call_paths_view
# comm_threads_view
# dsos_view
# machines_view
# samples_view
# symbols_view
# threads_view
#
# More examples of browsing the database with psql:
# Note that some of the examples are not the most optimal SQL query.
# Note that call information is only available if the script's 'calls' option has been used.
#
# Top 10 function calls (not aggregated by symbol):
#
# SELECT * FROM calls_view ORDER BY elapsed_time DESC LIMIT 10;
#
# Top 10 function calls (aggregated by symbol):
#
# SELECT symbol_id,(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,
# SUM(elapsed_time) AS tot_elapsed_time,SUM(branch_count) AS tot_branch_count
# FROM calls_view GROUP BY symbol_id ORDER BY tot_elapsed_time DESC LIMIT 10;
#
# Note that the branch count gives a rough estimation of cpu usage, so functions
# that took a long time but have a relatively low branch count must have spent time
# waiting.
#
# Find symbols by pattern matching on part of the name (e.g. names containing 'alloc'):
#
# SELECT * FROM symbols_view WHERE name LIKE '%alloc%';
#
# Top 10 function calls for a specific symbol (e.g. whose symbol_id is 187):
#
# SELECT * FROM calls_view WHERE symbol_id = 187 ORDER BY elapsed_time DESC LIMIT 10;
#
# Show function calls made by function in the same context (i.e. same call path) (e.g. one with call_path_id 254):
#
# SELECT * FROM calls_view WHERE parent_call_path_id = 254;
#
# Show branches made during a function call (e.g. where call_id is 29357 and return_id is 29370 and tid is 29670)
#
# SELECT * FROM samples_view WHERE id >= 29357 AND id <= 29370 AND tid = 29670 AND event LIKE 'branches%';
#
# Show transactions:
#
# SELECT * FROM samples_view WHERE event = 'transactions';
#
# Note transaction start has 'in_tx' true whereas, transaction end has 'in_tx' false.
# Transaction aborts have branch_type_name 'transaction abort'
#
# Show transaction aborts:
#
# SELECT * FROM samples_view WHERE event = 'transactions' AND branch_type_name = 'transaction abort';
#
# To print a call stack requires walking the call_paths table. For example this python script:
# #!/usr/bin/python2
#
# import sys
# from PySide.QtSql import *
#
# if __name__ == '__main__':
# if (len(sys.argv) < 3):
# print >> sys.stderr, "Usage is: printcallstack.py <database name> <call_path_id>"
# raise Exception("Too few arguments")
# dbname = sys.argv[1]
# call_path_id = sys.argv[2]
# db = QSqlDatabase.addDatabase('QPSQL')
# db.setDatabaseName(dbname)
# if not db.open():
# raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text())
# query = QSqlQuery(db)
# print " id ip symbol_id symbol dso_id dso_short_name"
# while call_path_id != 0 and call_path_id != 1:
# ret = query.exec_('SELECT * FROM call_paths_view WHERE id = ' + str(call_path_id))
# if not ret:
# raise Exception("Query failed: " + query.lastError().text())
# if not query.next():
# raise Exception("Query failed")
# print "{0:>6} {1:>10} {2:>9} {3:<30} {4:>6} {5:<30}".format(query.value(0), query.value(1), query.value(2), query.value(3), query.value(4), query.value(5))
# call_path_id = query.value(6)
from PySide.QtSql import *
# Need to access PostgreSQL C library directly to use COPY FROM STDIN
from ctypes import *
libpq = CDLL("libpq.so.5")
PQconnectdb = libpq.PQconnectdb
PQconnectdb.restype = c_void_p
PQfinish = libpq.PQfinish
PQstatus = libpq.PQstatus
PQexec = libpq.PQexec
PQexec.restype = c_void_p
PQresultStatus = libpq.PQresultStatus
PQputCopyData = libpq.PQputCopyData
PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ]
PQputCopyEnd = libpq.PQputCopyEnd
PQputCopyEnd.argtypes = [ c_void_p, c_void_p ]
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
# These perf imports are not used at present
#from perf_trace_context import *
#from Core import *
perf_db_export_mode = True
perf_db_export_calls = False
perf_db_export_callchains = False
def usage():
print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>] [<callchains>]"
print >> sys.stderr, "where: columns 'all' or 'branches'"
print >> sys.stderr, " calls 'calls' => create calls and call_paths table"
print >> sys.stderr, " callchains 'callchains' => create call_paths table"
raise Exception("Too few arguments")
if (len(sys.argv) < 2):
usage()
dbname = sys.argv[1]
if (len(sys.argv) >= 3):
columns = sys.argv[2]
else:
columns = "all"
if columns not in ("all", "branches"):
usage()
branches = (columns == "branches")
for i in range(3,len(sys.argv)):
if (sys.argv[i] == "calls"):
perf_db_export_calls = True
elif (sys.argv[i] == "callchains"):
perf_db_export_callchains = True
else:
usage()
output_dir_name = os.getcwd() + "/" + dbname + "-perf-data"
os.mkdir(output_dir_name)
def do_query(q, s):
if (q.exec_(s)):
return
raise Exception("Query failed: " + q.lastError().text())
print datetime.datetime.today(), "Creating database..."
db = QSqlDatabase.addDatabase('QPSQL')
query = QSqlQuery(db)
db.setDatabaseName('postgres')
db.open()
try:
do_query(query, 'CREATE DATABASE ' + dbname)
except:
os.rmdir(output_dir_name)
raise
query.finish()
query.clear()
db.close()
db.setDatabaseName(dbname)
db.open()
query = QSqlQuery(db)
do_query(query, 'SET client_min_messages TO WARNING')
do_query(query, 'CREATE TABLE selected_events ('
'id bigint NOT NULL,'
'name varchar(80))')
do_query(query, 'CREATE TABLE machines ('
'id bigint NOT NULL,'
'pid integer,'
'root_dir varchar(4096))')
do_query(query, 'CREATE TABLE threads ('
'id bigint NOT NULL,'
'machine_id bigint,'
'process_id bigint,'
'pid integer,'
'tid integer)')
do_query(query, 'CREATE TABLE comms ('
'id bigint NOT NULL,'
'comm varchar(16))')
do_query(query, 'CREATE TABLE comm_threads ('
'id bigint NOT NULL,'
'comm_id bigint,'
'thread_id bigint)')
do_query(query, 'CREATE TABLE dsos ('
'id bigint NOT NULL,'
'machine_id bigint,'
'short_name varchar(256),'
'long_name varchar(4096),'
'build_id varchar(64))')
do_query(query, 'CREATE TABLE symbols ('
'id bigint NOT NULL,'
'dso_id bigint,'
'sym_start bigint,'
'sym_end bigint,'
'binding integer,'
'name varchar(2048))')
do_query(query, 'CREATE TABLE branch_types ('
'id integer NOT NULL,'
'name varchar(80))')
if branches:
do_query(query, 'CREATE TABLE samples ('
'id bigint NOT NULL,'
'evsel_id bigint,'
'machine_id bigint,'
'thread_id bigint,'
'comm_id bigint,'
'dso_id bigint,'
'symbol_id bigint,'
'sym_offset bigint,'
'ip bigint,'
'time bigint,'
'cpu integer,'
'to_dso_id bigint,'
'to_symbol_id bigint,'
'to_sym_offset bigint,'
'to_ip bigint,'
'branch_type integer,'
'in_tx boolean)')
else:
do_query(query, 'CREATE TABLE samples ('
'id bigint NOT NULL,'
'evsel_id bigint,'
'machine_id bigint,'
'thread_id bigint,'
'comm_id bigint,'
'dso_id bigint,'
'symbol_id bigint,'
'sym_offset bigint,'
'ip bigint,'
'time bigint,'
'cpu integer,'
'to_dso_id bigint,'
'to_symbol_id bigint,'
'to_sym_offset bigint,'
'to_ip bigint,'
'period bigint,'
'weight bigint,'
'transaction bigint,'
'data_src bigint,'
'branch_type integer,'
'in_tx boolean,'
'call_path_id bigint)')
if perf_db_export_calls or perf_db_export_callchains:
do_query(query, 'CREATE TABLE call_paths ('
'id bigint NOT NULL,'
'parent_id bigint,'
'symbol_id bigint,'
'ip bigint)')
if perf_db_export_calls:
do_query(query, 'CREATE TABLE calls ('
'id bigint NOT NULL,'
'thread_id bigint,'
'comm_id bigint,'
'call_path_id bigint,'
'call_time bigint,'
'return_time bigint,'
'branch_count bigint,'
'call_id bigint,'
'return_id bigint,'
'parent_call_path_id bigint,'
'flags integer)')
do_query(query, 'CREATE VIEW machines_view AS '
'SELECT '
'id,'
'pid,'
'root_dir,'
'CASE WHEN id=0 THEN \'unknown\' WHEN pid=-1 THEN \'host\' ELSE \'guest\' END AS host_or_guest'
' FROM machines')
do_query(query, 'CREATE VIEW dsos_view AS '
'SELECT '
'id,'
'machine_id,'
'(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
'short_name,'
'long_name,'
'build_id'
' FROM dsos')
do_query(query, 'CREATE VIEW symbols_view AS '
'SELECT '
'id,'
'name,'
'(SELECT short_name FROM dsos WHERE id=dso_id) AS dso,'
'dso_id,'
'sym_start,'
'sym_end,'
'CASE WHEN binding=0 THEN \'local\' WHEN binding=1 THEN \'global\' ELSE \'weak\' END AS binding'
' FROM symbols')
do_query(query, 'CREATE VIEW threads_view AS '
'SELECT '
'id,'
'machine_id,'
'(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
'process_id,'
'pid,'
'tid'
' FROM threads')
do_query(query, 'CREATE VIEW comm_threads_view AS '
'SELECT '
'comm_id,'
'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
'thread_id,'
'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
'(SELECT tid FROM threads WHERE id = thread_id) AS tid'
' FROM comm_threads')
if perf_db_export_calls or perf_db_export_callchains:
do_query(query, 'CREATE VIEW call_paths_view AS '
'SELECT '
'c.id,'
'to_hex(c.ip) AS ip,'
'c.symbol_id,'
'(SELECT name FROM symbols WHERE id = c.symbol_id) AS symbol,'
'(SELECT dso_id FROM symbols WHERE id = c.symbol_id) AS dso_id,'
'(SELECT dso FROM symbols_view WHERE id = c.symbol_id) AS dso_short_name,'
'c.parent_id,'
'to_hex(p.ip) AS parent_ip,'
'p.symbol_id AS parent_symbol_id,'
'(SELECT name FROM symbols WHERE id = p.symbol_id) AS parent_symbol,'
'(SELECT dso_id FROM symbols WHERE id = p.symbol_id) AS parent_dso_id,'
'(SELECT dso FROM symbols_view WHERE id = p.symbol_id) AS parent_dso_short_name'
' FROM call_paths c INNER JOIN call_paths p ON p.id = c.parent_id')
if perf_db_export_calls:
do_query(query, 'CREATE VIEW calls_view AS '
'SELECT '
'calls.id,'
'thread_id,'
'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
'(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
'call_path_id,'
'to_hex(ip) AS ip,'
'symbol_id,'
'(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
'call_time,'
'return_time,'
'return_time - call_time AS elapsed_time,'
'branch_count,'
'call_id,'
'return_id,'
'CASE WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' ELSE \'\' END AS flags,'
'parent_call_path_id'
' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
do_query(query, 'CREATE VIEW samples_view AS '
'SELECT '
'id,'
'time,'
'cpu,'
'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
'(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
'(SELECT name FROM selected_events WHERE id = evsel_id) AS event,'
'to_hex(ip) AS ip_hex,'
'(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
'sym_offset,'
'(SELECT short_name FROM dsos WHERE id = dso_id) AS dso_short_name,'
'to_hex(to_ip) AS to_ip_hex,'
'(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,'
'to_sym_offset,'
'(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,'
'(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,'
'in_tx'
' FROM samples')
file_header = struct.pack("!11sii", "PGCOPY\n\377\r\n\0", 0, 0)
file_trailer = "\377\377"
def open_output_file(file_name):
path_name = output_dir_name + "/" + file_name
file = open(path_name, "w+")
file.write(file_header)
return file
def close_output_file(file):
file.write(file_trailer)
file.close()
def copy_output_file_direct(file, table_name):
close_output_file(file)
sql = "COPY " + table_name + " FROM '" + file.name + "' (FORMAT 'binary')"
do_query(query, sql)
# Use COPY FROM STDIN because security may prevent postgres from accessing the files directly
def copy_output_file(file, table_name):
conn = PQconnectdb("dbname = " + dbname)
if (PQstatus(conn)):
raise Exception("COPY FROM STDIN PQconnectdb failed")
file.write(file_trailer)
file.seek(0)
sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')"
res = PQexec(conn, sql)
if (PQresultStatus(res) != 4):
raise Exception("COPY FROM STDIN PQexec failed")
data = file.read(65536)
while (len(data)):
ret = PQputCopyData(conn, data, len(data))
if (ret != 1):
raise Exception("COPY FROM STDIN PQputCopyData failed, error " + str(ret))
data = file.read(65536)
ret = PQputCopyEnd(conn, None)
if (ret != 1):
raise Exception("COPY FROM STDIN PQputCopyEnd failed, error " + str(ret))
PQfinish(conn)
def remove_output_file(file):
name = file.name
file.close()
os.unlink(name)
evsel_file = open_output_file("evsel_table.bin")
machine_file = open_output_file("machine_table.bin")
thread_file = open_output_file("thread_table.bin")
comm_file = open_output_file("comm_table.bin")
comm_thread_file = open_output_file("comm_thread_table.bin")
dso_file = open_output_file("dso_table.bin")
symbol_file = open_output_file("symbol_table.bin")
branch_type_file = open_output_file("branch_type_table.bin")
sample_file = open_output_file("sample_table.bin")
if perf_db_export_calls or perf_db_export_callchains:
call_path_file = open_output_file("call_path_table.bin")
if perf_db_export_calls:
call_file = open_output_file("call_table.bin")
def trace_begin():
print datetime.datetime.today(), "Writing to intermediate files..."
# id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
evsel_table(0, "unknown")
machine_table(0, 0, "unknown")
thread_table(0, 0, 0, -1, -1)
comm_table(0, "unknown")
dso_table(0, 0, "unknown", "unknown", "")
symbol_table(0, 0, 0, 0, 0, "unknown")
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
if perf_db_export_calls or perf_db_export_callchains:
call_path_table(0, 0, 0, 0)
unhandled_count = 0
def trace_end():
print datetime.datetime.today(), "Copying to database..."
copy_output_file(evsel_file, "selected_events")
copy_output_file(machine_file, "machines")
copy_output_file(thread_file, "threads")
copy_output_file(comm_file, "comms")
copy_output_file(comm_thread_file, "comm_threads")
copy_output_file(dso_file, "dsos")
copy_output_file(symbol_file, "symbols")
copy_output_file(branch_type_file, "branch_types")
copy_output_file(sample_file, "samples")
if perf_db_export_calls or perf_db_export_callchains:
copy_output_file(call_path_file, "call_paths")
if perf_db_export_calls:
copy_output_file(call_file, "calls")
print datetime.datetime.today(), "Removing intermediate files..."
remove_output_file(evsel_file)
remove_output_file(machine_file)
remove_output_file(thread_file)
remove_output_file(comm_file)
remove_output_file(comm_thread_file)
remove_output_file(dso_file)
remove_output_file(symbol_file)
remove_output_file(branch_type_file)
remove_output_file(sample_file)
if perf_db_export_calls or perf_db_export_callchains:
remove_output_file(call_path_file)
if perf_db_export_calls:
remove_output_file(call_file)
os.rmdir(output_dir_name)
print datetime.datetime.today(), "Adding primary keys"
do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE comms ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE comm_threads ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE dsos ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE symbols ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE branch_types ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE samples ADD PRIMARY KEY (id)')
if perf_db_export_calls or perf_db_export_callchains:
do_query(query, 'ALTER TABLE call_paths ADD PRIMARY KEY (id)')
if perf_db_export_calls:
do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)')
print datetime.datetime.today(), "Adding foreign keys"
do_query(query, 'ALTER TABLE threads '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)')
do_query(query, 'ALTER TABLE comm_threads '
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id)')
do_query(query, 'ALTER TABLE dsos '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id)')
do_query(query, 'ALTER TABLE symbols '
'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id)')
do_query(query, 'ALTER TABLE samples '
'ADD CONSTRAINT evselfk FOREIGN KEY (evsel_id) REFERENCES selected_events (id),'
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id),'
'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id),'
'ADD CONSTRAINT todsofk FOREIGN KEY (to_dso_id) REFERENCES dsos (id),'
'ADD CONSTRAINT tosymbolfk FOREIGN KEY (to_symbol_id) REFERENCES symbols (id)')
if perf_db_export_calls or perf_db_export_callchains:
do_query(query, 'ALTER TABLE call_paths '
'ADD CONSTRAINT parentfk FOREIGN KEY (parent_id) REFERENCES call_paths (id),'
'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id)')
if perf_db_export_calls:
do_query(query, 'ALTER TABLE calls '
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT call_pathfk FOREIGN KEY (call_path_id) REFERENCES call_paths (id),'
'ADD CONSTRAINT callfk FOREIGN KEY (call_id) REFERENCES samples (id),'
'ADD CONSTRAINT returnfk FOREIGN KEY (return_id) REFERENCES samples (id),'
'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)')
do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
if (unhandled_count):
print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
print datetime.datetime.today(), "Done"
def trace_unhandled(event_name, context, event_fields_dict):
global unhandled_count
unhandled_count += 1
def sched__sched_switch(*x):
pass
def evsel_table(evsel_id, evsel_name, *x):
n = len(evsel_name)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name)
evsel_file.write(value)
def machine_table(machine_id, pid, root_dir, *x):
n = len(root_dir)
fmt = "!hiqiii" + str(n) + "s"
value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir)
machine_file.write(value)
def thread_table(thread_id, machine_id, process_id, pid, tid, *x):
value = struct.pack("!hiqiqiqiiii", 5, 8, thread_id, 8, machine_id, 8, process_id, 4, pid, 4, tid)
thread_file.write(value)
def comm_table(comm_id, comm_str, *x):
n = len(comm_str)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, comm_id, n, comm_str)
comm_file.write(value)
def comm_thread_table(comm_thread_id, comm_id, thread_id, *x):
fmt = "!hiqiqiq"
value = struct.pack(fmt, 3, 8, comm_thread_id, 8, comm_id, 8, thread_id)
comm_thread_file.write(value)
def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
n1 = len(short_name)
n2 = len(long_name)
n3 = len(build_id)
fmt = "!hiqiqi" + str(n1) + "si" + str(n2) + "si" + str(n3) + "s"
value = struct.pack(fmt, 5, 8, dso_id, 8, machine_id, n1, short_name, n2, long_name, n3, build_id)
dso_file.write(value)
def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x):
n = len(symbol_name)
fmt = "!hiqiqiqiqiii" + str(n) + "s"
value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name)
symbol_file.write(value)
def branch_type_table(branch_type, name, *x):
n = len(name)
fmt = "!hiii" + str(n) + "s"
value = struct.pack(fmt, 2, 4, branch_type, n, name)
branch_type_file.write(value)
def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, call_path_id, *x):
if branches:
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiBiq", 18, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx, 8, call_path_id)
else:
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiBiq", 22, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx, 8, call_path_id)
sample_file.write(value)
def call_path_table(cp_id, parent_id, symbol_id, ip, *x):
fmt = "!hiqiqiqiq"
value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip)
call_path_file.write(value)
def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, *x):
fmt = "!hiqiqiqiqiqiqiqiqiqiqii"
value = struct.pack(fmt, 11, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags)
call_file.write(value)
|
cts2/rf2service
|
refs/heads/master
|
server/Concept.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Mayo Clinic
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the <ORGANIZATION> nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
from server.BaseNode import expose
from server.RF2BaseNode import RF2BaseNode, global_iter_parms, validate
from rf2db.utils.sctid import sctid
from rf2db.db.RF2ConceptFile import ConceptDB, concept_parms, concept_list_parms, new_concept_parms, \
update_concept_parms, delete_concept_parms
from server.config.Rf2Entries import settings
concdb = ConceptDB()
class Concept(RF2BaseNode):
title = "Read RF2 concept by concept id"
label = "Concept SCTID"
value = settings.refConcept
@expose
@validate(concept_parms)
def default(self, parms, **kwargs):
dbrec = concdb.read(int(sctid(parms.concept)), **parms.dict)
return dbrec, (404, "Concept %s not found" % parms.concept)
@expose("POST")
@validate(new_concept_parms)
def new(self, parms, **kwargs):
# A POST cannot supply a concept id
kwargs.pop('concept', None)
dbrec = concdb.add(**parms.dict)
if isinstance(dbrec, str):
return None, (400, dbrec)
elif not dbrec:
return None, (500, "Unable to create concept record")
self.redirect('/concept/%s' % dbrec.id)
@expose(methods="PUT")
@validate(update_concept_parms)
def update(self, parms, concept, **_):
return concdb.update(concept, **parms.dict)
@expose(methods=["DELETE"])
@validate(delete_concept_parms)
def delete(self, parms, concept, **_):
return concdb.delete(concept, **parms.dict)
class Concepts(RF2BaseNode):
title = "List concepts starting after"
label = "Concept SCTID"
value = 0
extensions = RF2BaseNode.extensions + [global_iter_parms]
@expose
@validate(concept_list_parms)
def default(self, parms, **_):
return concdb.as_list(concdb.getAllConcepts(**parms.dict), parms)
|
Spiderlover/Toontown
|
refs/heads/master
|
toontown/coghq/CogHQExterior.py
|
2
|
from direct.directnotify import DirectNotifyGlobal
from toontown.battle import BattlePlace
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from toontown.toonbase import ToontownGlobals
from pandac.PandaModules import *
from otp.distributed.TelemetryLimiter import RotationLimitToH, TLGatherAllAvs
from toontown.nametag import NametagGlobals
class CogHQExterior(BattlePlace.BattlePlace):
notify = DirectNotifyGlobal.directNotify.newCategory('CogHQExterior')
def __init__(self, loader, parentFSM, doneEvent):
BattlePlace.BattlePlace.__init__(self, loader, doneEvent)
self.parentFSM = parentFSM
self.fsm = ClassicFSM.ClassicFSM('CogHQExterior', [State.State('start', self.enterStart, self.exitStart, ['walk',
'tunnelIn',
'teleportIn',
'doorIn']),
State.State('walk', self.enterWalk, self.exitWalk, ['stickerBook',
'teleportOut',
'tunnelOut',
'DFA',
'doorOut',
'died',
'stopped',
'WaitForBattle',
'battle',
'squished',
'stopped']),
State.State('stopped', self.enterStopped, self.exitStopped, ['walk', 'teleportOut', 'stickerBook']),
State.State('doorIn', self.enterDoorIn, self.exitDoorIn, ['walk', 'stopped']),
State.State('doorOut', self.enterDoorOut, self.exitDoorOut, ['walk', 'stopped']),
State.State('stickerBook', self.enterStickerBook, self.exitStickerBook, ['walk',
'DFA',
'WaitForBattle',
'battle',
'tunnelOut',
'doorOut',
'squished',
'died']),
State.State('WaitForBattle', self.enterWaitForBattle, self.exitWaitForBattle, ['battle', 'walk']),
State.State('battle', self.enterBattle, self.exitBattle, ['walk', 'teleportOut', 'died']),
State.State('DFA', self.enterDFA, self.exitDFA, ['DFAReject', 'teleportOut', 'tunnelOut']),
State.State('DFAReject', self.enterDFAReject, self.exitDFAReject, ['walk']),
State.State('squished', self.enterSquished, self.exitSquished, ['walk', 'died', 'teleportOut']),
State.State('teleportIn', self.enterTeleportIn, self.exitTeleportIn, ['walk', 'WaitForBattle', 'battle']),
State.State('teleportOut', self.enterTeleportOut, self.exitTeleportOut, ['teleportIn', 'final', 'WaitForBattle']),
State.State('died', self.enterDied, self.exitDied, ['quietZone']),
State.State('tunnelIn', self.enterTunnelIn, self.exitTunnelIn, ['walk', 'WaitForBattle', 'battle']),
State.State('tunnelOut', self.enterTunnelOut, self.exitTunnelOut, ['final']),
State.State('final', self.enterFinal, self.exitFinal, ['start'])], 'start', 'final')
def load(self):
self.parentFSM.getStateNamed('cogHQExterior').addChild(self.fsm)
BattlePlace.BattlePlace.load(self)
def unload(self):
self.parentFSM.getStateNamed('cogHQExterior').removeChild(self.fsm)
del self.fsm
BattlePlace.BattlePlace.unload(self)
def enter(self, requestStatus):
self.zoneId = requestStatus['zoneId']
BattlePlace.BattlePlace.enter(self)
self.fsm.enterInitialState()
base.playMusic(self.loader.music, looping=1, volume=0.8)
self.loader.geom.reparentTo(render)
self.nodeList = [self.loader.geom]
self._telemLimiter = TLGatherAllAvs('CogHQExterior', RotationLimitToH)
self.accept('doorDoneEvent', self.handleDoorDoneEvent)
self.accept('DistributedDoor_doorTrigger', self.handleDoorTrigger)
NametagGlobals.setWant2dNametags(True)
self.tunnelOriginList = base.cr.hoodMgr.addLinkTunnelHooks(self, self.nodeList, self.zoneId)
how = requestStatus['how']
self.fsm.request(how, [requestStatus])
def exit(self):
self.fsm.requestFinalState()
self._telemLimiter.destroy()
del self._telemLimiter
self.loader.music.stop()
for node in self.tunnelOriginList:
node.removeNode()
del self.tunnelOriginList
if self.loader.geom:
self.loader.geom.reparentTo(hidden)
self.ignoreAll()
BattlePlace.BattlePlace.exit(self)
def enterTunnelOut(self, requestStatus):
fromZoneId = self.zoneId - self.zoneId % 100
tunnelName = base.cr.hoodMgr.makeLinkTunnelName(self.loader.hood.id, fromZoneId)
requestStatus['tunnelName'] = tunnelName
BattlePlace.BattlePlace.enterTunnelOut(self, requestStatus)
def enterTeleportIn(self, requestStatus):
x, y, z, h, p, r = base.cr.hoodMgr.getPlaygroundCenterFromId(self.loader.hood.id)
base.localAvatar.setPosHpr(render, x, y, z, h, p, r)
BattlePlace.BattlePlace.enterTeleportIn(self, requestStatus)
def enterTeleportOut(self, requestStatus, callback = None):
if 'battle' in requestStatus:
self.__teleportOutDone(requestStatus)
else:
BattlePlace.BattlePlace.enterTeleportOut(self, requestStatus, self.__teleportOutDone)
def __teleportOutDone(self, requestStatus):
hoodId = requestStatus['hoodId']
zoneId = requestStatus['zoneId']
avId = requestStatus['avId']
shardId = requestStatus['shardId']
if hoodId == self.loader.hood.hoodId and zoneId == self.loader.hood.hoodId and shardId == None:
self.fsm.request('teleportIn', [requestStatus])
elif hoodId == ToontownGlobals.MyEstate:
self.getEstateZoneAndGoHome(requestStatus)
else:
self.doneStatus = requestStatus
messenger.send(self.doneEvent)
return
def exitTeleportOut(self):
BattlePlace.BattlePlace.exitTeleportOut(self)
def enterSquished(self):
base.localAvatar.laffMeter.start()
base.localAvatar.b_setAnimState('Squish')
taskMgr.doMethodLater(2.0, self.handleSquishDone, base.localAvatar.uniqueName('finishSquishTask'))
def handleSquishDone(self, extraArgs = []):
base.cr.playGame.getPlace().setState('walk')
def exitSquished(self):
taskMgr.remove(base.localAvatar.uniqueName('finishSquishTask'))
base.localAvatar.laffMeter.stop()
|
yanheven/ceilometer
|
refs/heads/master
|
ceilometer/ipmi/pollsters/node.py
|
1
|
# Copyright 2014 Intel
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_config import cfg
from oslo_utils import timeutils
import six
from ceilometer.agent import plugin_base
from ceilometer.i18n import _
from ceilometer.ipmi.platform import exception as nmexcept
from ceilometer.ipmi.platform import intel_node_manager as node_manager
from ceilometer.openstack.common import log
from ceilometer import sample
CONF = cfg.CONF
CONF.import_opt('host', 'ceilometer.service')
CONF.import_opt('polling_retry', 'ceilometer.ipmi.pollsters',
group='ipmi')
LOG = log.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class _Base(plugin_base.PollsterBase):
def setup_environment(self):
super(_Base, self).setup_environment()
self.nodemanager = node_manager.NodeManager()
self.polling_failures = 0
# Do not load this extension if no NM support
if self.nodemanager.nm_version == 0:
raise plugin_base.ExtensionLoadError()
@property
def default_discovery(self):
return 'local_node'
def get_value(self, stats):
"""Get value from statistics."""
return node_manager._hex(stats["Current_value"])
@abc.abstractmethod
def read_data(self, cache):
"""Return data sample for IPMI."""
def get_samples(self, manager, cache, resources):
# Only one resource for Node Manager pollster
try:
stats = self.read_data(cache)
except nmexcept.IPMIException:
self.polling_failures += 1
LOG.warning(_('Polling %(name)s faild for %(cnt)s times!')
% ({'name': self.NAME,
'cnt': self.polling_failures}))
if (CONF.ipmi.polling_retry >= 0 and
self.polling_failures > CONF.ipmi.polling_retry):
LOG.warning(_('Pollster for %s is disabled!') % self.NAME)
raise plugin_base.PollsterPermanentError(resources[0])
else:
return
self.polling_failures = 0
metadata = {
'node': CONF.host
}
if stats:
data = self.get_value(stats)
yield sample.Sample(
name=self.NAME,
type=self.TYPE,
unit=self.UNIT,
volume=data,
user_id=None,
project_id=None,
resource_id=CONF.host,
timestamp=timeutils.utcnow().isoformat(),
resource_metadata=metadata)
class InletTemperaturePollster(_Base):
# Note(ildikov): The new meter name should be
# "hardware.ipmi.node.inlet_temperature". As currently there
# is no meter deprecation support in the code, we should use the
# old name in order to avoid confusion.
NAME = "hardware.ipmi.node.temperature"
TYPE = sample.TYPE_GAUGE
UNIT = "C"
def read_data(self, cache):
return self.nodemanager.read_inlet_temperature()
class OutletTemperaturePollster(_Base):
NAME = "hardware.ipmi.node.outlet_temperature"
TYPE = sample.TYPE_GAUGE
UNIT = "C"
def read_data(self, cache):
return self.nodemanager.read_outlet_temperature()
class PowerPollster(_Base):
NAME = "hardware.ipmi.node.power"
TYPE = sample.TYPE_GAUGE
UNIT = "W"
def read_data(self, cache):
return self.nodemanager.read_power_all()
class AirflowPollster(_Base):
NAME = "hardware.ipmi.node.airflow"
TYPE = sample.TYPE_GAUGE
UNIT = "CFM"
def read_data(self, cache):
return self.nodemanager.read_airflow()
class CUPSIndexPollster(_Base):
NAME = "hardware.ipmi.node.cups"
TYPE = sample.TYPE_GAUGE
UNIT = "CUPS"
def read_data(self, cache):
return self.nodemanager.read_cups_index()
def get_value(self, stats):
return node_manager._hex(stats["CUPS_Index"])
class _CUPSUtilPollsterBase(_Base):
CACHE_KEY_CUPS = 'CUPS'
def read_data(self, cache):
i_cache = cache.setdefault(self.CACHE_KEY_CUPS, {})
if not i_cache:
i_cache.update(self.nodemanager.read_cups_utilization())
return i_cache
class CPUUtilPollster(_CUPSUtilPollsterBase):
NAME = "hardware.ipmi.node.cpu_util"
TYPE = sample.TYPE_GAUGE
UNIT = "%"
def get_value(self, stats):
return node_manager._hex(stats["CPU_Utilization"])
class MemUtilPollster(_CUPSUtilPollsterBase):
NAME = "hardware.ipmi.node.mem_util"
TYPE = sample.TYPE_GAUGE
UNIT = "%"
def get_value(self, stats):
return node_manager._hex(stats["Mem_Utilization"])
class IOUtilPollster(_CUPSUtilPollsterBase):
NAME = "hardware.ipmi.node.io_util"
TYPE = sample.TYPE_GAUGE
UNIT = "%"
def get_value(self, stats):
return node_manager._hex(stats["IO_Utilization"])
|
eemirtekin/edx-platform
|
refs/heads/master
|
lms/djangoapps/courseware/features/courseware.py
|
177
|
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, step
from lettuce.django import django_url
@step('I visit the courseware URL$')
def i_visit_the_course_info_url(step):
url = django_url('/courses/MITx/6.002x/2012_Fall/courseware')
world.browser.visit(url)
|
2014cdag10/2014cdag10
|
refs/heads/master
|
wsgi/static/Brython2.1.0-20140419-113919/Lib/webbrowser.py
|
735
|
from browser import window
__all__ = ["Error", "open", "open_new", "open_new_tab"]
class Error(Exception):
pass
_target = { 0: '', 1: '_blank', 2: '_new' } # hack...
def open(url, new=0, autoraise=True):
"""
new window or tab is not controllable
on the client side. autoraise not available.
"""
if window.open(url, _target[new]):
return True
return False
def open_new(url):
return open(url, 1)
def open_new_tab(url):
return open(url, 2)
|
matthaywardwebdesign/rethinkdb
|
refs/heads/next
|
external/v8_3.30.33.16/build/gyp/test/win/gyptest-link-debug-info.py
|
344
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure debug info setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('debug-info.gyp', chdir=CHDIR)
test.build('debug-info.gyp', test.ALL, chdir=CHDIR)
suffix = '.exe.pdb' if test.format == 'ninja' else '.pdb'
test.built_file_must_not_exist('test_debug_off%s' % suffix, chdir=CHDIR)
test.built_file_must_exist('test_debug_on%s' % suffix, chdir=CHDIR)
test.pass_test()
|
stormi/tsunami
|
refs/heads/master
|
src/primaires/scripting/actions/deplacer.py
|
1
|
# -*-coding:Utf-8 -*
# Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant l'action deplacer."""
from primaires.format.fonctions import supprimer_accents
from primaires.perso.exceptions.action import ExceptionAction
from primaires.scripting.action import Action
from primaires.scripting.instruction import ErreurExecution
class ClasseAction(Action):
"""Un ppersonnage se déplace vers une sortie indiquée.
Cette action demande à un personnage de se déplacer dans la direction
indiquée. Il est préférable de l'utiliser avec des tests ou conditions
pour s'assurer que le personnage est bien dans la salle choisie
avant de lui demander de se déplacer, sauf si c'est vraiment le but
recherché. En outre, cette action peut aussi bien agir sur les
PNJ que les joueurs, soyez prudent.
"""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.deplacer_personnage, "Personnage",
"str")
@staticmethod
def deplacer_personnage(personnage, sortie):
"""Déplace le personnage vers la sortie indiquée.
La sortie peut être donnée sous la forme de sa direction absolue
ou son nom renommé (les noms comme "escalier" sont autorisés).
"""
# Obtension de la direction
try:
sortie = personnage.salle.sorties.get_sortie_par_nom_ou_direction(
supprimer_accents(sortie).lower())
assert sortie
except (KeyError, AssertionError):
raise ErreurExecution("la sortie {} est introuvable en {}".format(
sortie, personnage.salle))
try:
personnage.deplacer_vers(sortie.nom)
except ExceptionAction:
pass
|
mgit-at/ansible
|
refs/heads/devel
|
lib/ansible/modules/packaging/os/pkg5_publisher.py
|
102
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2014 Peter Oliver <ansible@mavit.org.uk>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pkg5_publisher
author: "Peter Oliver (@mavit)"
short_description: Manages Solaris 11 Image Packaging System publishers
version_added: 1.9
description:
- IPS packages are the native packages in Solaris 11 and higher.
- This modules will configure which publishers a client will download IPS
packages from.
options:
name:
description:
- The publisher's name.
required: true
aliases: [ publisher ]
state:
description:
- Whether to ensure that a publisher is present or absent.
default: present
choices: [ present, absent ]
sticky:
description:
- Packages installed from a sticky repository can only receive updates
from that repository.
type: bool
enabled:
description:
- Is the repository enabled or disabled?
type: bool
origin:
description:
- A path or URL to the repository.
- Multiple values may be provided.
mirror:
description:
- A path or URL to the repository mirror.
- Multiple values may be provided.
'''
EXAMPLES = '''
# Fetch packages for the solaris publisher direct from Oracle:
- pkg5_publisher:
name: solaris
sticky: true
origin: https://pkg.oracle.com/solaris/support/
# Configure a publisher for locally-produced packages:
- pkg5_publisher:
name: site
origin: 'https://pkg.example.com/site/'
'''
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, aliases=['publisher']),
state=dict(default='present', choices=['present', 'absent']),
sticky=dict(type='bool'),
enabled=dict(type='bool'),
# search_after=dict(),
# search_before=dict(),
origin=dict(type='list'),
mirror=dict(type='list'),
)
)
for option in ['origin', 'mirror']:
if module.params[option] == ['']:
module.params[option] = []
if module.params['state'] == 'present':
modify_publisher(module, module.params)
else:
unset_publisher(module, module.params['name'])
def modify_publisher(module, params):
name = params['name']
existing = get_publishers(module)
if name in existing:
for option in ['origin', 'mirror', 'sticky', 'enabled']:
if params[option] is not None:
if params[option] != existing[name][option]:
return set_publisher(module, params)
else:
return set_publisher(module, params)
module.exit_json()
def set_publisher(module, params):
name = params['name']
args = []
if params['origin'] is not None:
args.append('--remove-origin=*')
args.extend(['--add-origin=' + u for u in params['origin']])
if params['mirror'] is not None:
args.append('--remove-mirror=*')
args.extend(['--add-mirror=' + u for u in params['mirror']])
if params['sticky'] is not None and params['sticky']:
args.append('--sticky')
elif params['sticky'] is not None:
args.append('--non-sticky')
if params['enabled'] is not None and params['enabled']:
args.append('--enable')
elif params['enabled'] is not None:
args.append('--disable')
rc, out, err = module.run_command(
["pkg", "set-publisher"] + args + [name],
check_rc=True
)
response = {
'rc': rc,
'results': [out],
'msg': err,
'changed': True,
}
if rc != 0:
module.fail_json(**response)
module.exit_json(**response)
def unset_publisher(module, publisher):
if publisher not in get_publishers(module):
module.exit_json()
rc, out, err = module.run_command(
["pkg", "unset-publisher", publisher],
check_rc=True
)
response = {
'rc': rc,
'results': [out],
'msg': err,
'changed': True,
}
if rc != 0:
module.fail_json(**response)
module.exit_json(**response)
def get_publishers(module):
rc, out, err = module.run_command(["pkg", "publisher", "-Ftsv"], True)
lines = out.splitlines()
keys = lines.pop(0).lower().split("\t")
publishers = {}
for line in lines:
values = dict(zip(keys, map(unstringify, line.split("\t"))))
name = values['publisher']
if name not in publishers:
publishers[name] = dict(
(k, values[k]) for k in ['sticky', 'enabled']
)
publishers[name]['origin'] = []
publishers[name]['mirror'] = []
if values['type'] is not None:
publishers[name][values['type']].append(values['uri'])
return publishers
def unstringify(val):
if val == "-" or val == '':
return None
elif val == "true":
return True
elif val == "false":
return False
else:
return val
if __name__ == '__main__':
main()
|
bixbydev/Bixby
|
refs/heads/master
|
google/dist/gdata-2.0.18/samples/oauth/oauth_on_appengine/appengine_utilities/cron.py
|
129
|
"""
Copyright (c) 2008, appengine-utilities project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the appengine-utilities project nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import cgi
import re
import datetime
import pickle
from google.appengine.ext import db
from google.appengine.api import urlfetch
from google.appengine.api import memcache
APPLICATION_PORT = '8080'
CRON_PORT = '8081'
class _AppEngineUtilities_Cron(db.Model):
"""
Model for the tasks in the datastore. This contains the scheduling and
url information, as well as a field that sets the next time the instance
should run.
"""
cron_entry = db.StringProperty()
next_run = db.DateTimeProperty()
cron_compiled = db.BlobProperty()
url = db.LinkProperty()
class Cron(object):
"""
Cron is a scheduling utility built for appengine, modeled after
crontab for unix systems. While true scheduled tasks are not
possible within the Appengine environment currently, this
is an attmempt to provide a request based alternate. You
configure the tasks in an included interface, and the import
the class on any request you want capable of running tasks.
On each request where Cron is imported, the list of tasks
that need to be run will be pulled and run. A task is a url
within your application. It's important to make sure that these
requests fun quickly, or you could risk timing out the actual
request.
See the documentation for more information on configuring
your application to support Cron and setting up tasks.
"""
def __init__(self):
# Check if any tasks need to be run
query = _AppEngineUtilities_Cron.all()
query.filter('next_run <= ', datetime.datetime.now())
results = query.fetch(1000)
if len(results) > 0:
one_second = datetime.timedelta(seconds = 1)
before = datetime.datetime.now()
for r in results:
if re.search(':' + APPLICATION_PORT, r.url):
r.url = re.sub(':' + APPLICATION_PORT, ':' + CRON_PORT, r.url)
#result = urlfetch.fetch(r.url)
diff = datetime.datetime.now() - before
if int(diff.seconds) < 1:
if memcache.add(str(r.key), "running"):
result = urlfetch.fetch(r.url)
r.next_run = self._get_next_run(pickle.loads(r.cron_compiled))
r.put()
memcache.delete(str(r.key))
else:
break
def add_cron(self, cron_string):
cron = cron_string.split(" ")
if len(cron) is not 6:
raise ValueError, 'Invalid cron string. Format: * * * * * url'
cron = {
'min': cron[0],
'hour': cron[1],
'day': cron[2],
'mon': cron[3],
'dow': cron[4],
'url': cron[5],
}
cron_compiled = self._validate_cron(cron)
next_run = self._get_next_run(cron_compiled)
cron_entry = _AppEngineUtilities_Cron()
cron_entry.cron_entry = cron_string
cron_entry.next_run = next_run
cron_entry.cron_compiled = pickle.dumps(cron_compiled)
cron_entry.url = cron["url"]
cron_entry.put()
def _validate_cron(self, cron):
"""
Parse the field to determine whether it is an integer or lists,
also converting strings to integers where necessary. If passed bad
values, raises a ValueError.
"""
parsers = {
'dow': self._validate_dow,
'mon': self._validate_mon,
'day': self._validate_day,
'hour': self._validate_hour,
'min': self._validate_min,
'url': self. _validate_url,
}
for el in cron:
parse = parsers[el]
cron[el] = parse(cron[el])
return cron
def _validate_type(self, v, t):
"""
Validates that the number (v) passed is in the correct range for the
type (t). Raise ValueError, if validation fails.
Valid ranges:
day of week = 0-7
month = 1-12
day = 1-31
hour = 0-23
minute = 0-59
All can * which will then return the range for that entire type.
"""
if t == "dow":
if v >= 0 and v <= 7:
return [v]
elif v == "*":
return "*"
else:
raise ValueError, "Invalid day of week."
elif t == "mon":
if v >= 1 and v <= 12:
return [v]
elif v == "*":
return range(1, 12)
else:
raise ValueError, "Invalid month."
elif t == "day":
if v >= 1 and v <= 31:
return [v]
elif v == "*":
return range(1, 31)
else:
raise ValueError, "Invalid day."
elif t == "hour":
if v >= 0 and v <= 23:
return [v]
elif v == "*":
return range(0, 23)
else:
raise ValueError, "Invalid hour."
elif t == "min":
if v >= 0 and v <= 59:
return [v]
elif v == "*":
return range(0, 59)
else:
raise ValueError, "Invalid minute."
def _validate_list(self, l, t):
"""
Validates a crontab list. Lists are numerical values seperated
by a comma with no spaces. Ex: 0,5,10,15
Arguments:
l: comma seperated list of numbers
t: type used for validation, valid values are
dow, mon, day, hour, min
"""
elements = l.split(",")
return_list = []
# we have a list, validate all of them
for e in elements:
if "-" in e:
return_list.extend(self._validate_range(e, t))
else:
try:
v = int(e)
self._validate_type(v, t)
return_list.append(v)
except:
raise ValueError, "Names are not allowed in lists."
# return a list of integers
return return_list
def _validate_range(self, r, t):
"""
Validates a crontab range. Ranges are 2 numerical values seperated
by a dash with no spaces. Ex: 0-10
Arguments:
r: dash seperated list of 2 numbers
t: type used for validation, valid values are
dow, mon, day, hour, min
"""
elements = r.split('-')
# a range should be 2 elements
if len(elements) is not 2:
raise ValueError, "Invalid range passed: " + str(r)
# validate the minimum and maximum are valid for the type
for e in elements:
self._validate_type(int(e), t)
# return a list of the numbers in the range.
# +1 makes sure the end point is included in the return value
return range(int(elements[0]), int(elements[1]) + 1)
def _validate_step(self, s, t):
"""
Validates a crontab step. Steps are complicated. They can
be based on a range 1-10/2 or just step through all valid
*/2. When parsing times you should always check for step first
and see if it has a range or not, before checking for ranges because
this will handle steps of ranges returning the final list. Steps
of lists is not supported.
Arguments:
s: slash seperated string
t: type used for validation, valid values are
dow, mon, day, hour, min
"""
elements = s.split('/')
# a range should be 2 elements
if len(elements) is not 2:
raise ValueError, "Invalid step passed: " + str(s)
try:
step = int(elements[1])
except:
raise ValueError, "Invalid step provided " + str(s)
r_list = []
# if the first element is *, use all valid numbers
if elements[0] is "*" or elements[0] is "":
r_list.extend(self._validate_type('*', t))
# check and see if there is a list of ranges
elif "," in elements[0]:
ranges = elements[0].split(",")
for r in ranges:
# if it's a range, we need to manage that
if "-" in r:
r_list.extend(self._validate_range(r, t))
else:
try:
r_list.extend(int(r))
except:
raise ValueError, "Invalid step provided " + str(s)
elif "-" in elements[0]:
r_list.extend(self._validate_range(elements[0], t))
return range(r_list[0], r_list[-1] + 1, step)
def _validate_dow(self, dow):
"""
"""
# if dow is * return it. This is for date parsing where * does not mean
# every day for crontab entries.
if dow is "*":
return dow
days = {
'mon': 1,
'tue': 2,
'wed': 3,
'thu': 4,
'fri': 5,
'sat': 6,
# per man crontab sunday can be 0 or 7.
'sun': [0, 7],
}
if dow in days:
dow = days[dow]
return [dow]
# if dow is * return it. This is for date parsing where * does not mean
# every day for crontab entries.
elif dow is "*":
return dow
elif "/" in dow:
return(self._validate_step(dow, "dow"))
elif "," in dow:
return(self._validate_list(dow, "dow"))
elif "-" in dow:
return(self._validate_range(dow, "dow"))
else:
valid_numbers = range(0, 8)
if not int(dow) in valid_numbers:
raise ValueError, "Invalid day of week " + str(dow)
else:
return [int(dow)]
def _validate_mon(self, mon):
months = {
'jan': 1,
'feb': 2,
'mar': 3,
'apr': 4,
'may': 5,
'jun': 6,
'jul': 7,
'aug': 8,
'sep': 9,
'oct': 10,
'nov': 11,
'dec': 12,
}
if mon in months:
mon = months[mon]
return [mon]
elif mon is "*":
return range(1, 13)
elif "/" in mon:
return(self._validate_step(mon, "mon"))
elif "," in mon:
return(self._validate_list(mon, "mon"))
elif "-" in mon:
return(self._validate_range(mon, "mon"))
else:
valid_numbers = range(1, 13)
if not int(mon) in valid_numbers:
raise ValueError, "Invalid month " + str(mon)
else:
return [int(mon)]
def _validate_day(self, day):
if day is "*":
return range(1, 32)
elif "/" in day:
return(self._validate_step(day, "day"))
elif "," in day:
return(self._validate_list(day, "day"))
elif "-" in day:
return(self._validate_range(day, "day"))
else:
valid_numbers = range(1, 31)
if not int(day) in valid_numbers:
raise ValueError, "Invalid day " + str(day)
else:
return [int(day)]
def _validate_hour(self, hour):
if hour is "*":
return range(0, 24)
elif "/" in hour:
return(self._validate_step(hour, "hour"))
elif "," in hour:
return(self._validate_list(hour, "hour"))
elif "-" in hour:
return(self._validate_range(hour, "hour"))
else:
valid_numbers = range(0, 23)
if not int(hour) in valid_numbers:
raise ValueError, "Invalid hour " + str(hour)
else:
return [int(hour)]
def _validate_min(self, min):
if min is "*":
return range(0, 60)
elif "/" in min:
return(self._validate_step(min, "min"))
elif "," in min:
return(self._validate_list(min, "min"))
elif "-" in min:
return(self._validate_range(min, "min"))
else:
valid_numbers = range(0, 59)
if not int(min) in valid_numbers:
raise ValueError, "Invalid min " + str(min)
else:
return [int(min)]
def _validate_url(self, url):
# kludge for issue 842, right now we use request headers
# to set the host.
if url[0] is not "/":
url = "/" + url
url = 'http://' + str(os.environ['HTTP_HOST']) + url
return url
# content below is for when that issue gets fixed
#regex = re.compile("^(http|https):\/\/([a-z0-9-]\.+)*", re.IGNORECASE)
#if regex.match(url) is not None:
# return url
#else:
# raise ValueError, "Invalid url " + url
def _calc_month(self, next_run, cron):
while True:
if cron["mon"][-1] < next_run.month:
next_run = next_run.replace(year=next_run.year+1, \
month=cron["mon"][0], \
day=1,hour=0,minute=0)
else:
if next_run.month in cron["mon"]:
return next_run
else:
one_month = datetime.timedelta(months=1)
next_run = next_run + one_month
def _calc_day(self, next_run, cron):
# start with dow as per cron if dow and day are set
# then dow is used if it comes before day. If dow
# is *, then ignore it.
if str(cron["dow"]) != str("*"):
# convert any integers to lists in order to easily compare values
m = next_run.month
while True:
if next_run.month is not m:
next_run = next_run.replace(hour=0, minute=0)
next_run = self._calc_month(next_run, cron)
if next_run.weekday() in cron["dow"] or next_run.day in cron["day"]:
return next_run
else:
one_day = datetime.timedelta(days=1)
next_run = next_run + one_day
else:
m = next_run.month
while True:
if next_run.month is not m:
next_run = next_run.replace(hour=0, minute=0)
next_run = self._calc_month(next_run, cron)
# if cron["dow"] is next_run.weekday() or cron["day"] is next_run.day:
if next_run.day in cron["day"]:
return next_run
else:
one_day = datetime.timedelta(days=1)
next_run = next_run + one_day
def _calc_hour(self, next_run, cron):
m = next_run.month
d = next_run.day
while True:
if next_run.month is not m:
next_run = next_run.replace(hour=0, minute=0)
next_run = self._calc_month(next_run, cron)
if next_run.day is not d:
next_run = next_run.replace(hour=0)
next_run = self._calc_day(next_run, cron)
if next_run.hour in cron["hour"]:
return next_run
else:
m = next_run.month
d = next_run.day
one_hour = datetime.timedelta(hours=1)
next_run = next_run + one_hour
def _calc_minute(self, next_run, cron):
one_minute = datetime.timedelta(minutes=1)
m = next_run.month
d = next_run.day
h = next_run.hour
while True:
if next_run.month is not m:
next_run = next_run.replace(minute=0)
next_run = self._calc_month(next_run, cron)
if next_run.day is not d:
next_run = next_run.replace(minute=0)
next_run = self._calc_day(next_run, cron)
if next_run.hour is not h:
next_run = next_run.replace(minute=0)
next_run = self._calc_day(next_run, cron)
if next_run.minute in cron["min"]:
return next_run
else:
m = next_run.month
d = next_run.day
h = next_run.hour
next_run = next_run + one_minute
def _get_next_run(self, cron):
one_minute = datetime.timedelta(minutes=1)
# go up 1 minute because it shouldn't happen right when added
now = datetime.datetime.now() + one_minute
next_run = now.replace(second=0, microsecond=0)
# start with month, which will also help calculate year
next_run = self._calc_month(next_run, cron)
next_run = self._calc_day(next_run, cron)
next_run = self._calc_hour(next_run, cron)
next_run = self._calc_minute(next_run, cron)
return next_run
|
FATruden/boto
|
refs/heads/master
|
boto/dynamodb2/exceptions.py
|
20
|
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import JSONResponseError
class ProvisionedThroughputExceededException(JSONResponseError):
pass
class LimitExceededException(JSONResponseError):
pass
class ConditionalCheckFailedException(JSONResponseError):
pass
class ResourceInUseException(JSONResponseError):
pass
class ResourceNotFoundException(JSONResponseError):
pass
class InternalServerError(JSONResponseError):
pass
class ValidationException(JSONResponseError):
pass
class ItemCollectionSizeLimitExceededException(JSONResponseError):
pass
class DynamoDBError(Exception):
pass
class UnknownSchemaFieldError(DynamoDBError):
pass
class UnknownIndexFieldError(DynamoDBError):
pass
class UnknownFilterTypeError(DynamoDBError):
pass
class QueryError(DynamoDBError):
pass
|
ds-hwang/chromium-crosswalk
|
refs/heads/master
|
chrome/browser/web_dev_style/css_checker.py
|
3
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium WebUI resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
# TODO(dbeam): Real CSS parser? https://github.com/danbeam/css-py/tree/css3
class CSSChecker(object):
def __init__(self, input_api, output_api, file_filter=None):
self.input_api = input_api
self.output_api = output_api
self.file_filter = file_filter
def RunChecks(self):
# We use this a lot, so make a nick name variable.
re = self.input_api.re
def _collapseable_hex(s):
return (len(s) == 6 and s[0] == s[1] and s[2] == s[3] and s[4] == s[5])
def _is_gray(s):
return s[0] == s[1] == s[2] if len(s) == 3 else s[0:2] == s[2:4] == s[4:6]
def _remove_all(s):
s = _remove_grit(s)
s = _remove_ats(s)
s = _remove_comments(s)
s = _remove_template_expressions(s)
s = _remove_mixins(s)
return s
def _extract_inline_style(s):
return '\n'.join(re.findall(r'<style>([^<]*)<\/style>', s))
def _remove_ats(s):
at_reg = re.compile(r"""
@(?!\d+x\b)\w+[^'"]*?{ # @at-keyword selector junk {, not @2x
(.*{.*?})+ # inner { curly } blocks, rules, and selector
.*?} # stuff up to the first end curly }
""",
re.DOTALL | re.VERBOSE)
return at_reg.sub('\\1', s)
def _remove_comments(s):
return re.sub(re.compile(r'/\*.*?\*/', re.DOTALL), '', s)
def _remove_mixins(s):
return re.sub(re.compile(r'--[\d\w-]+: {.*?};', re.DOTALL), '', s)
def _remove_template_expressions(s):
return re.sub(re.compile(r'\$i18n{[^}]*}', re.DOTALL), '', s)
def _remove_grit(s):
grit_reg = re.compile(r"""
<if[^>]+>.*?<\s*/\s*if[^>]*>| # <if> contents </if>
<include[^>]+> # <include>
""",
re.DOTALL | re.VERBOSE)
return re.sub(grit_reg, '', s)
def _rgb_from_hex(s):
if len(s) == 3:
r, g, b = s[0] + s[0], s[1] + s[1], s[2] + s[2]
else:
r, g, b = s[0:2], s[2:4], s[4:6]
return int(r, base=16), int(g, base=16), int(b, base=16)
def _strip_prefix(s):
return re.sub(r'^-(?:o|ms|moz|khtml|webkit)-', '', s)
def alphabetize_props(contents):
errors = []
# TODO(dbeam): make this smart enough to detect issues in mixins.
for rule in re.finditer(r'{(.*?)}', contents, re.DOTALL):
semis = map(lambda t: t.strip(), rule.group(1).split(';'))[:-1]
rules = filter(lambda r: ': ' in r, semis)
props = map(lambda r: r[0:r.find(':')], rules)
if props != sorted(props):
errors.append(' %s;\n' % (';\n '.join(rules)))
return errors
def braces_have_space_before_and_nothing_after(line):
brace_space_reg = re.compile(r"""
(?:^|\S){| # selector{ or selector\n{ or
{\s*\S+\s* # selector { with stuff after it
$ # must be at the end of a line
""",
re.VERBOSE)
return brace_space_reg.search(line)
def classes_use_dashes(line):
# Intentionally dumbed down version of CSS 2.1 grammar for class without
# non-ASCII, escape chars, or whitespace.
class_reg = re.compile(r"""
\.(-?[\w-]+).* # ., then maybe -, then alpha numeric and -
[,{]\s*$ # selectors should end with a , or {
""",
re.VERBOSE)
m = class_reg.search(line)
if not m:
return False
class_name = m.group(1)
return class_name.lower() != class_name or '_' in class_name
end_mixin_reg = re.compile(r'\s*};\s*$')
def close_brace_on_new_line(line):
# Ignore single frames in a @keyframe, i.e. 0% { margin: 50px; }
frame_reg = re.compile(r"""
\s*(from|to|\d+%)\s*{ # 50% {
\s*[\w-]+: # rule:
(\s*[\w\(\), -]+)+\s*; # value;
\s*}\s* # }
""",
re.VERBOSE)
return ('}' in line and re.search(r'[^ }]', line) and
not frame_reg.match(line) and not end_mixin_reg.match(line))
def colons_have_space_after(line):
colon_space_reg = re.compile(r"""
(?<!data) # ignore data URIs
:(?!//) # ignore url(http://), etc.
\S[^;]+;\s* # only catch one-line rules for now
""",
re.VERBOSE)
return colon_space_reg.search(line)
def favor_single_quotes(line):
return '"' in line
# Shared between hex_could_be_shorter and rgb_if_not_gray.
hex_reg = re.compile(r"""
\#([a-fA-F0-9]{3}|[a-fA-F0-9]{6}) # pound followed by 3 or 6 hex digits
(?=[^\w-]|$) # no more alphanum chars or at EOL
(?!.*(?:{.*|,\s*)$) # not in a selector
""",
re.VERBOSE)
def hex_could_be_shorter(line):
m = hex_reg.search(line)
return (m and _is_gray(m.group(1)) and _collapseable_hex(m.group(1)))
def rgb_if_not_gray(line):
m = hex_reg.search(line)
return (m and not _is_gray(m.group(1)))
small_seconds_reg = re.compile(r"""
(?:^|[^\w-]) # start of a line or a non-alphanumeric char
(0?\.[0-9]+)s # 1.0s
(?!-?[\w-]) # no following - or alphanumeric chars
""",
re.VERBOSE)
def milliseconds_for_small_times(line):
return small_seconds_reg.search(line)
def suggest_ms_from_s(line):
ms = int(float(small_seconds_reg.search(line).group(1)) * 1000)
return ' (replace with %dms)' % ms
def no_data_uris_in_source_files(line):
return re.search(r'\(\s*\s*data:', line)
def no_quotes_in_url(line):
return re.search('url\s*\(\s*["\']', line, re.IGNORECASE)
def one_rule_per_line(line):
one_rule_reg = re.compile(r"""
[\w-](?<!data): # a rule: but no data URIs
(?!//)[^;]+; # value; ignoring colons in protocols:// and };
\s*[^ }]\s* # any non-space after the end colon
""",
re.VERBOSE)
return one_rule_reg.search(line) and not end_mixin_reg.match(line)
def pseudo_elements_double_colon(contents):
pseudo_elements = ['after',
'before',
'calendar-picker-indicator',
'color-swatch',
'color-swatch-wrapper',
'date-and-time-container',
'date-and-time-value',
'datetime-edit',
'datetime-edit-ampm-field',
'datetime-edit-day-field',
'datetime-edit-hour-field',
'datetime-edit-millisecond-field',
'datetime-edit-minute-field',
'datetime-edit-month-field',
'datetime-edit-second-field',
'datetime-edit-text',
'datetime-edit-week-field',
'datetime-edit-year-field',
'details-marker',
'file-upload-button',
'first-letter',
'first-line',
'inner-spin-button',
'input-placeholder',
'input-speech-button',
'keygen-select',
'media-slider-container',
'media-slider-thumb',
'meter-bar',
'meter-even-less-good-value',
'meter-inner-element',
'meter-optimum-value',
'meter-suboptimum-value',
'progress-bar',
'progress-inner-element',
'progress-value',
'resizer',
'scrollbar',
'scrollbar-button',
'scrollbar-corner',
'scrollbar-thumb',
'scrollbar-track',
'scrollbar-track-piece',
'search-cancel-button',
'search-decoration',
'search-results-button',
'search-results-decoration',
'selection',
'slider-container',
'slider-runnable-track',
'slider-thumb',
'textfield-decoration-container',
'validation-bubble',
'validation-bubble-arrow',
'validation-bubble-arrow-clipper',
'validation-bubble-heading',
'validation-bubble-message',
'validation-bubble-text-block']
pseudo_reg = re.compile(r"""
(?<!:): # a single colon, i.e. :after but not ::after
([a-zA-Z-]+) # a pseudo element, class, or function
(?=[^{}]+?{) # make sure a selector, not inside { rules }
""",
re.MULTILINE | re.VERBOSE)
errors = []
for p in re.finditer(pseudo_reg, contents):
pseudo = p.group(1).strip().splitlines()[0]
if _strip_prefix(pseudo.lower()) in pseudo_elements:
errors.append(' :%s (should be ::%s)' % (pseudo, pseudo))
return errors
def one_selector_per_line(contents):
any_reg = re.compile(r"""
:(?:-webkit-)?any\(.*?\) # :-webkit-any(a, b, i) selector
""",
re.DOTALL | re.VERBOSE)
multi_sels_reg = re.compile(r"""
(?:}\s*)? # ignore 0% { blah: blah; }, from @keyframes
([^,]+,(?=[^{}]+?{) # selector junk {, not in a { rule }
.*[,{])\s*$ # has to end with , or {
""",
re.MULTILINE | re.VERBOSE)
errors = []
for b in re.finditer(multi_sels_reg, re.sub(any_reg, '', contents)):
errors.append(' ' + b.group(1).strip().splitlines()[-1:][0])
return errors
def suggest_rgb_from_hex(line):
suggestions = ['rgb(%d, %d, %d)' % _rgb_from_hex(h.group(1))
for h in re.finditer(hex_reg, line)]
return ' (replace with %s)' % ', '.join(suggestions)
def suggest_short_hex(line):
h = hex_reg.search(line).group(1)
return ' (replace with #%s)' % (h[0] + h[2] + h[4])
webkit_before_or_after_reg = re.compile(r'-webkit-(\w+-)(after|before):')
def suggest_top_or_bottom(line):
prop, pos = webkit_before_or_after_reg.search(line).groups()
top_or_bottom = 'top' if pos == 'before' else 'bottom'
return ' (replace with %s)' % (prop + top_or_bottom)
def webkit_before_or_after(line):
return webkit_before_or_after_reg.search(line)
def zero_width_lengths(contents):
hsl_reg = re.compile(r"""
hsl\([^\)]* # hsl(maybestuff
(?:[, ]|(?<=\()) # a comma or space not followed by a (
(?:0?\.?)?0% # some equivalent to 0%
""",
re.VERBOSE)
zeros_reg = re.compile(r"""
^.*(?:^|[^0-9.]) # start/non-number
(?:\.0|0(?:\.0? # .0, 0, or 0.0
|px|em|%|in|cm|mm|pc|pt|ex)) # a length unit
(?:\D|$) # non-number/end
(?=[^{}]+?}).*$ # only { rules }
""",
re.MULTILINE | re.VERBOSE)
errors = []
for z in re.finditer(zeros_reg, contents):
first_line = z.group(0).strip().splitlines()[0]
if not hsl_reg.search(first_line):
errors.append(' ' + first_line)
return errors
# NOTE: Currently multi-line checks don't support 'after'. Instead, add
# suggestions while parsing the file so another pass isn't necessary.
added_or_modified_files_checks = [
{ 'desc': 'Alphabetize properties and list vendor specific (i.e. '
'-webkit) above standard.',
'test': alphabetize_props,
'multiline': True,
},
{ 'desc': 'Start braces ({) end a selector, have a space before them '
'and no rules after.',
'test': braces_have_space_before_and_nothing_after,
},
{ 'desc': 'Classes use .dash-form.',
'test': classes_use_dashes,
},
{ 'desc': 'Always put a rule closing brace (}) on a new line.',
'test': close_brace_on_new_line,
},
{ 'desc': 'Colons (:) should have a space after them.',
'test': colons_have_space_after,
},
{ 'desc': 'Use single quotes (\') instead of double quotes (") in '
'strings.',
'test': favor_single_quotes,
},
{ 'desc': 'Use abbreviated hex (#rgb) when in form #rrggbb.',
'test': hex_could_be_shorter,
'after': suggest_short_hex,
},
{ 'desc': 'Use milliseconds for time measurements under 1 second.',
'test': milliseconds_for_small_times,
'after': suggest_ms_from_s,
},
{ 'desc': "Don't use data URIs in source files. Use grit instead.",
'test': no_data_uris_in_source_files,
},
{ 'desc': "Don't use quotes in url().",
'test': no_quotes_in_url,
},
{ 'desc': 'One rule per line (what not to do: color: red; margin: 0;).',
'test': one_rule_per_line,
},
{ 'desc': 'One selector per line (what not to do: a, b {}).',
'test': one_selector_per_line,
'multiline': True,
},
{ 'desc': 'Pseudo-elements should use double colon (i.e. ::after).',
'test': pseudo_elements_double_colon,
'multiline': True,
},
{ 'desc': 'Use rgb() over #hex when not a shade of gray (like #333).',
'test': rgb_if_not_gray,
'after': suggest_rgb_from_hex,
},
{ 'desc': 'Use *-top/bottom instead of -webkit-*-before/after.',
'test': webkit_before_or_after,
'after': suggest_top_or_bottom,
},
{ 'desc': 'Use "0" for zero-width lengths (i.e. 0px -> 0)',
'test': zero_width_lengths,
'multiline': True,
},
]
results = []
affected_files = self.input_api.AffectedFiles(include_deletes=False,
file_filter=self.file_filter)
files = []
for f in affected_files:
file_contents = '\n'.join(f.NewContents())
path = f.LocalPath()
# Handle CSS files and HTML files with inline styles.
if path.endswith('.html'):
file_contents = _extract_inline_style(file_contents)
if path.endswith('.html') or path.endswith('.css'):
# Remove all /*comments*/, @at-keywords, and grit <if|include> tags;
# we're not using a real parser. TODO(dbeam): Check alpha in <if>
# blocks.
file_contents = _remove_all(file_contents)
files.append((path, file_contents))
for f in files:
file_errors = []
for check in added_or_modified_files_checks:
# If the check is multiline, it receieves the whole file and gives us
# back a list of things wrong. If the check isn't multiline, we pass it
# each line and the check returns something truthy if there's an issue.
if ('multiline' in check and check['multiline']):
assert not 'after' in check
check_errors = check['test'](f[1])
if len(check_errors) > 0:
file_errors.append('- %s\n%s' %
(check['desc'], '\n'.join(check_errors).rstrip()))
else:
check_errors = []
lines = f[1].splitlines()
for lnum, line in enumerate(lines):
if check['test'](line):
error = ' ' + line.strip()
if 'after' in check:
error += check['after'](line)
check_errors.append(error)
if len(check_errors) > 0:
file_errors.append('- %s\n%s' %
(check['desc'], '\n'.join(check_errors)))
if file_errors:
results.append(self.output_api.PresubmitPromptWarning(
'%s:\n%s' % (f[0], '\n\n'.join(file_errors))))
return results
|
twobob/buildroot-kindle
|
refs/heads/master
|
output/build/host-python-2.7.2/Lib/test/test_shutil.py
|
14
|
# Copyright (C) 2003 Python Software Foundation
import unittest
import shutil
import tempfile
import sys
import stat
import os
import os.path
from os.path import splitdrive
from distutils.spawn import find_executable, spawn
from shutil import (_make_tarball, _make_zipfile, make_archive,
register_archive_format, unregister_archive_format,
get_archive_formats)
import tarfile
import warnings
from test import test_support
from test.test_support import TESTFN, check_warnings, captured_stdout
TESTFN2 = TESTFN + "2"
try:
import grp
import pwd
UID_GID_SUPPORT = True
except ImportError:
UID_GID_SUPPORT = False
try:
import zlib
except ImportError:
zlib = None
try:
import zipfile
ZIP_SUPPORT = True
except ImportError:
ZIP_SUPPORT = find_executable('zip')
class TestShutil(unittest.TestCase):
def setUp(self):
super(TestShutil, self).setUp()
self.tempdirs = []
def tearDown(self):
super(TestShutil, self).tearDown()
while self.tempdirs:
d = self.tempdirs.pop()
shutil.rmtree(d, os.name in ('nt', 'cygwin'))
def write_file(self, path, content='xxx'):
"""Writes a file in the given path.
path can be a string or a sequence.
"""
if isinstance(path, (list, tuple)):
path = os.path.join(*path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
def mkdtemp(self):
"""Create a temporary directory that will be cleaned up.
Returns the path of the directory.
"""
d = tempfile.mkdtemp()
self.tempdirs.append(d)
return d
def test_rmtree_errors(self):
# filename is guaranteed not to exist
filename = tempfile.mktemp()
self.assertRaises(OSError, shutil.rmtree, filename)
# See bug #1071513 for why we don't run this on cygwin
# and bug #1076467 for why we don't run this as root.
if (hasattr(os, 'chmod') and sys.platform[:6] != 'cygwin'
and not (hasattr(os, 'geteuid') and os.geteuid() == 0)):
def test_on_error(self):
self.errorState = 0
os.mkdir(TESTFN)
self.childpath = os.path.join(TESTFN, 'a')
f = open(self.childpath, 'w')
f.close()
old_dir_mode = os.stat(TESTFN).st_mode
old_child_mode = os.stat(self.childpath).st_mode
# Make unwritable.
os.chmod(self.childpath, stat.S_IREAD)
os.chmod(TESTFN, stat.S_IREAD)
shutil.rmtree(TESTFN, onerror=self.check_args_to_onerror)
# Test whether onerror has actually been called.
self.assertEqual(self.errorState, 2,
"Expected call to onerror function did not happen.")
# Make writable again.
os.chmod(TESTFN, old_dir_mode)
os.chmod(self.childpath, old_child_mode)
# Clean up.
shutil.rmtree(TESTFN)
def check_args_to_onerror(self, func, arg, exc):
# test_rmtree_errors deliberately runs rmtree
# on a directory that is chmod 400, which will fail.
# This function is run when shutil.rmtree fails.
# 99.9% of the time it initially fails to remove
# a file in the directory, so the first time through
# func is os.remove.
# However, some Linux machines running ZFS on
# FUSE experienced a failure earlier in the process
# at os.listdir. The first failure may legally
# be either.
if self.errorState == 0:
if func is os.remove:
self.assertEqual(arg, self.childpath)
else:
self.assertIs(func, os.listdir,
"func must be either os.remove or os.listdir")
self.assertEqual(arg, TESTFN)
self.assertTrue(issubclass(exc[0], OSError))
self.errorState = 1
else:
self.assertEqual(func, os.rmdir)
self.assertEqual(arg, TESTFN)
self.assertTrue(issubclass(exc[0], OSError))
self.errorState = 2
def test_rmtree_dont_delete_file(self):
# When called on a file instead of a directory, don't delete it.
handle, path = tempfile.mkstemp()
os.fdopen(handle).close()
self.assertRaises(OSError, shutil.rmtree, path)
os.remove(path)
def test_copytree_simple(self):
def write_data(path, data):
f = open(path, "w")
f.write(data)
f.close()
def read_data(path):
f = open(path)
data = f.read()
f.close()
return data
src_dir = tempfile.mkdtemp()
dst_dir = os.path.join(tempfile.mkdtemp(), 'destination')
write_data(os.path.join(src_dir, 'test.txt'), '123')
os.mkdir(os.path.join(src_dir, 'test_dir'))
write_data(os.path.join(src_dir, 'test_dir', 'test.txt'), '456')
try:
shutil.copytree(src_dir, dst_dir)
self.assertTrue(os.path.isfile(os.path.join(dst_dir, 'test.txt')))
self.assertTrue(os.path.isdir(os.path.join(dst_dir, 'test_dir')))
self.assertTrue(os.path.isfile(os.path.join(dst_dir, 'test_dir',
'test.txt')))
actual = read_data(os.path.join(dst_dir, 'test.txt'))
self.assertEqual(actual, '123')
actual = read_data(os.path.join(dst_dir, 'test_dir', 'test.txt'))
self.assertEqual(actual, '456')
finally:
for path in (
os.path.join(src_dir, 'test.txt'),
os.path.join(dst_dir, 'test.txt'),
os.path.join(src_dir, 'test_dir', 'test.txt'),
os.path.join(dst_dir, 'test_dir', 'test.txt'),
):
if os.path.exists(path):
os.remove(path)
for path in (src_dir,
os.path.dirname(dst_dir)
):
if os.path.exists(path):
shutil.rmtree(path)
def test_copytree_with_exclude(self):
def write_data(path, data):
f = open(path, "w")
f.write(data)
f.close()
def read_data(path):
f = open(path)
data = f.read()
f.close()
return data
# creating data
join = os.path.join
exists = os.path.exists
src_dir = tempfile.mkdtemp()
try:
dst_dir = join(tempfile.mkdtemp(), 'destination')
write_data(join(src_dir, 'test.txt'), '123')
write_data(join(src_dir, 'test.tmp'), '123')
os.mkdir(join(src_dir, 'test_dir'))
write_data(join(src_dir, 'test_dir', 'test.txt'), '456')
os.mkdir(join(src_dir, 'test_dir2'))
write_data(join(src_dir, 'test_dir2', 'test.txt'), '456')
os.mkdir(join(src_dir, 'test_dir2', 'subdir'))
os.mkdir(join(src_dir, 'test_dir2', 'subdir2'))
write_data(join(src_dir, 'test_dir2', 'subdir', 'test.txt'), '456')
write_data(join(src_dir, 'test_dir2', 'subdir2', 'test.py'), '456')
# testing glob-like patterns
try:
patterns = shutil.ignore_patterns('*.tmp', 'test_dir2')
shutil.copytree(src_dir, dst_dir, ignore=patterns)
# checking the result: some elements should not be copied
self.assertTrue(exists(join(dst_dir, 'test.txt')))
self.assertTrue(not exists(join(dst_dir, 'test.tmp')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2')))
finally:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
try:
patterns = shutil.ignore_patterns('*.tmp', 'subdir*')
shutil.copytree(src_dir, dst_dir, ignore=patterns)
# checking the result: some elements should not be copied
self.assertTrue(not exists(join(dst_dir, 'test.tmp')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir2')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir')))
finally:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
# testing callable-style
try:
def _filter(src, names):
res = []
for name in names:
path = os.path.join(src, name)
if (os.path.isdir(path) and
path.split()[-1] == 'subdir'):
res.append(name)
elif os.path.splitext(path)[-1] in ('.py'):
res.append(name)
return res
shutil.copytree(src_dir, dst_dir, ignore=_filter)
# checking the result: some elements should not be copied
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir2',
'test.py')))
self.assertTrue(not exists(join(dst_dir, 'test_dir2', 'subdir')))
finally:
if os.path.exists(dst_dir):
shutil.rmtree(dst_dir)
finally:
shutil.rmtree(src_dir)
shutil.rmtree(os.path.dirname(dst_dir))
if hasattr(os, "symlink"):
def test_dont_copy_file_onto_link_to_itself(self):
# bug 851123.
os.mkdir(TESTFN)
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
try:
f = open(src, 'w')
f.write('cheddar')
f.close()
os.link(src, dst)
self.assertRaises(shutil.Error, shutil.copyfile, src, dst)
with open(src, 'r') as f:
self.assertEqual(f.read(), 'cheddar')
os.remove(dst)
# Using `src` here would mean we end up with a symlink pointing
# to TESTFN/TESTFN/cheese, while it should point at
# TESTFN/cheese.
os.symlink('cheese', dst)
self.assertRaises(shutil.Error, shutil.copyfile, src, dst)
with open(src, 'r') as f:
self.assertEqual(f.read(), 'cheddar')
os.remove(dst)
finally:
try:
shutil.rmtree(TESTFN)
except OSError:
pass
def test_rmtree_on_symlink(self):
# bug 1669.
os.mkdir(TESTFN)
try:
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
os.mkdir(src)
os.symlink(src, dst)
self.assertRaises(OSError, shutil.rmtree, dst)
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
if hasattr(os, "mkfifo"):
# Issue #3002: copyfile and copytree block indefinitely on named pipes
def test_copyfile_named_pipe(self):
os.mkfifo(TESTFN)
try:
self.assertRaises(shutil.SpecialFileError,
shutil.copyfile, TESTFN, TESTFN2)
self.assertRaises(shutil.SpecialFileError,
shutil.copyfile, __file__, TESTFN)
finally:
os.remove(TESTFN)
def test_copytree_named_pipe(self):
os.mkdir(TESTFN)
try:
subdir = os.path.join(TESTFN, "subdir")
os.mkdir(subdir)
pipe = os.path.join(subdir, "mypipe")
os.mkfifo(pipe)
try:
shutil.copytree(TESTFN, TESTFN2)
except shutil.Error as e:
errors = e.args[0]
self.assertEqual(len(errors), 1)
src, dst, error_msg = errors[0]
self.assertEqual("`%s` is a named pipe" % pipe, error_msg)
else:
self.fail("shutil.Error should have been raised")
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
shutil.rmtree(TESTFN2, ignore_errors=True)
@unittest.skipUnless(zlib, "requires zlib")
def test_make_tarball(self):
# creating something to tar
tmpdir = self.mkdtemp()
self.write_file([tmpdir, 'file1'], 'xxx')
self.write_file([tmpdir, 'file2'], 'xxx')
os.mkdir(os.path.join(tmpdir, 'sub'))
self.write_file([tmpdir, 'sub', 'file3'], 'xxx')
tmpdir2 = self.mkdtemp()
unittest.skipUnless(splitdrive(tmpdir)[0] == splitdrive(tmpdir2)[0],
"source and target should be on same drive")
base_name = os.path.join(tmpdir2, 'archive')
# working with relative paths to avoid tar warnings
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(splitdrive(base_name)[1], '.')
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
tarball = base_name + '.tar.gz'
self.assertTrue(os.path.exists(tarball))
# trying an uncompressed one
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(splitdrive(base_name)[1], '.', compress=None)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
def _tarinfo(self, path):
tar = tarfile.open(path)
try:
names = tar.getnames()
names.sort()
return tuple(names)
finally:
tar.close()
def _create_files(self):
# creating something to tar
tmpdir = self.mkdtemp()
dist = os.path.join(tmpdir, 'dist')
os.mkdir(dist)
self.write_file([dist, 'file1'], 'xxx')
self.write_file([dist, 'file2'], 'xxx')
os.mkdir(os.path.join(dist, 'sub'))
self.write_file([dist, 'sub', 'file3'], 'xxx')
os.mkdir(os.path.join(dist, 'sub2'))
tmpdir2 = self.mkdtemp()
base_name = os.path.join(tmpdir2, 'archive')
return tmpdir, tmpdir2, base_name
@unittest.skipUnless(zlib, "Requires zlib")
@unittest.skipUnless(find_executable('tar') and find_executable('gzip'),
'Need the tar command to run')
def test_tarfile_vs_tar(self):
tmpdir, tmpdir2, base_name = self._create_files()
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(base_name, 'dist')
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
tarball = base_name + '.tar.gz'
self.assertTrue(os.path.exists(tarball))
# now create another tarball using `tar`
tarball2 = os.path.join(tmpdir, 'archive2.tar.gz')
tar_cmd = ['tar', '-cf', 'archive2.tar', 'dist']
gzip_cmd = ['gzip', '-f9', 'archive2.tar']
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
with captured_stdout() as s:
spawn(tar_cmd)
spawn(gzip_cmd)
finally:
os.chdir(old_dir)
self.assertTrue(os.path.exists(tarball2))
# let's compare both tarballs
self.assertEqual(self._tarinfo(tarball), self._tarinfo(tarball2))
# trying an uncompressed one
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(base_name, 'dist', compress=None)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
# now for a dry_run
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(base_name, 'dist', compress=None, dry_run=True)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
@unittest.skipUnless(zlib, "Requires zlib")
@unittest.skipUnless(ZIP_SUPPORT, 'Need zip support to run')
def test_make_zipfile(self):
# creating something to tar
tmpdir = self.mkdtemp()
self.write_file([tmpdir, 'file1'], 'xxx')
self.write_file([tmpdir, 'file2'], 'xxx')
tmpdir2 = self.mkdtemp()
base_name = os.path.join(tmpdir2, 'archive')
_make_zipfile(base_name, tmpdir)
# check if the compressed tarball was created
tarball = base_name + '.zip'
self.assertTrue(os.path.exists(tarball))
def test_make_archive(self):
tmpdir = self.mkdtemp()
base_name = os.path.join(tmpdir, 'archive')
self.assertRaises(ValueError, make_archive, base_name, 'xxx')
@unittest.skipUnless(zlib, "Requires zlib")
def test_make_archive_owner_group(self):
# testing make_archive with owner and group, with various combinations
# this works even if there's not gid/uid support
if UID_GID_SUPPORT:
group = grp.getgrgid(0)[0]
owner = pwd.getpwuid(0)[0]
else:
group = owner = 'root'
base_dir, root_dir, base_name = self._create_files()
base_name = os.path.join(self.mkdtemp() , 'archive')
res = make_archive(base_name, 'zip', root_dir, base_dir, owner=owner,
group=group)
self.assertTrue(os.path.exists(res))
res = make_archive(base_name, 'zip', root_dir, base_dir)
self.assertTrue(os.path.exists(res))
res = make_archive(base_name, 'tar', root_dir, base_dir,
owner=owner, group=group)
self.assertTrue(os.path.exists(res))
res = make_archive(base_name, 'tar', root_dir, base_dir,
owner='kjhkjhkjg', group='oihohoh')
self.assertTrue(os.path.exists(res))
@unittest.skipUnless(zlib, "Requires zlib")
@unittest.skipUnless(UID_GID_SUPPORT, "Requires grp and pwd support")
def test_tarfile_root_owner(self):
tmpdir, tmpdir2, base_name = self._create_files()
old_dir = os.getcwd()
os.chdir(tmpdir)
group = grp.getgrgid(0)[0]
owner = pwd.getpwuid(0)[0]
try:
archive_name = _make_tarball(base_name, 'dist', compress=None,
owner=owner, group=group)
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
self.assertTrue(os.path.exists(archive_name))
# now checks the rights
archive = tarfile.open(archive_name)
try:
for member in archive.getmembers():
self.assertEqual(member.uid, 0)
self.assertEqual(member.gid, 0)
finally:
archive.close()
def test_make_archive_cwd(self):
current_dir = os.getcwd()
def _breaks(*args, **kw):
raise RuntimeError()
register_archive_format('xxx', _breaks, [], 'xxx file')
try:
try:
make_archive('xxx', 'xxx', root_dir=self.mkdtemp())
except Exception:
pass
self.assertEqual(os.getcwd(), current_dir)
finally:
unregister_archive_format('xxx')
def test_register_archive_format(self):
self.assertRaises(TypeError, register_archive_format, 'xxx', 1)
self.assertRaises(TypeError, register_archive_format, 'xxx', lambda: x,
1)
self.assertRaises(TypeError, register_archive_format, 'xxx', lambda: x,
[(1, 2), (1, 2, 3)])
register_archive_format('xxx', lambda: x, [(1, 2)], 'xxx file')
formats = [name for name, params in get_archive_formats()]
self.assertIn('xxx', formats)
unregister_archive_format('xxx')
formats = [name for name, params in get_archive_formats()]
self.assertNotIn('xxx', formats)
class TestMove(unittest.TestCase):
def setUp(self):
filename = "foo"
self.src_dir = tempfile.mkdtemp()
self.dst_dir = tempfile.mkdtemp()
self.src_file = os.path.join(self.src_dir, filename)
self.dst_file = os.path.join(self.dst_dir, filename)
# Try to create a dir in the current directory, hoping that it is
# not located on the same filesystem as the system tmp dir.
try:
self.dir_other_fs = tempfile.mkdtemp(
dir=os.path.dirname(__file__))
self.file_other_fs = os.path.join(self.dir_other_fs,
filename)
except OSError:
self.dir_other_fs = None
with open(self.src_file, "wb") as f:
f.write("spam")
def tearDown(self):
for d in (self.src_dir, self.dst_dir, self.dir_other_fs):
try:
if d:
shutil.rmtree(d)
except:
pass
def _check_move_file(self, src, dst, real_dst):
with open(src, "rb") as f:
contents = f.read()
shutil.move(src, dst)
with open(real_dst, "rb") as f:
self.assertEqual(contents, f.read())
self.assertFalse(os.path.exists(src))
def _check_move_dir(self, src, dst, real_dst):
contents = sorted(os.listdir(src))
shutil.move(src, dst)
self.assertEqual(contents, sorted(os.listdir(real_dst)))
self.assertFalse(os.path.exists(src))
def test_move_file(self):
# Move a file to another location on the same filesystem.
self._check_move_file(self.src_file, self.dst_file, self.dst_file)
def test_move_file_to_dir(self):
# Move a file inside an existing dir on the same filesystem.
self._check_move_file(self.src_file, self.dst_dir, self.dst_file)
def test_move_file_other_fs(self):
# Move a file to an existing dir on another filesystem.
if not self.dir_other_fs:
# skip
return
self._check_move_file(self.src_file, self.file_other_fs,
self.file_other_fs)
def test_move_file_to_dir_other_fs(self):
# Move a file to another location on another filesystem.
if not self.dir_other_fs:
# skip
return
self._check_move_file(self.src_file, self.dir_other_fs,
self.file_other_fs)
def test_move_dir(self):
# Move a dir to another location on the same filesystem.
dst_dir = tempfile.mktemp()
try:
self._check_move_dir(self.src_dir, dst_dir, dst_dir)
finally:
try:
shutil.rmtree(dst_dir)
except:
pass
def test_move_dir_other_fs(self):
# Move a dir to another location on another filesystem.
if not self.dir_other_fs:
# skip
return
dst_dir = tempfile.mktemp(dir=self.dir_other_fs)
try:
self._check_move_dir(self.src_dir, dst_dir, dst_dir)
finally:
try:
shutil.rmtree(dst_dir)
except:
pass
def test_move_dir_to_dir(self):
# Move a dir inside an existing dir on the same filesystem.
self._check_move_dir(self.src_dir, self.dst_dir,
os.path.join(self.dst_dir, os.path.basename(self.src_dir)))
def test_move_dir_to_dir_other_fs(self):
# Move a dir inside an existing dir on another filesystem.
if not self.dir_other_fs:
# skip
return
self._check_move_dir(self.src_dir, self.dir_other_fs,
os.path.join(self.dir_other_fs, os.path.basename(self.src_dir)))
def test_existing_file_inside_dest_dir(self):
# A file with the same name inside the destination dir already exists.
with open(self.dst_file, "wb"):
pass
self.assertRaises(shutil.Error, shutil.move, self.src_file, self.dst_dir)
def test_dont_move_dir_in_itself(self):
# Moving a dir inside itself raises an Error.
dst = os.path.join(self.src_dir, "bar")
self.assertRaises(shutil.Error, shutil.move, self.src_dir, dst)
def test_destinsrc_false_negative(self):
os.mkdir(TESTFN)
try:
for src, dst in [('srcdir', 'srcdir/dest')]:
src = os.path.join(TESTFN, src)
dst = os.path.join(TESTFN, dst)
self.assertTrue(shutil._destinsrc(src, dst),
msg='_destinsrc() wrongly concluded that '
'dst (%s) is not in src (%s)' % (dst, src))
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
def test_destinsrc_false_positive(self):
os.mkdir(TESTFN)
try:
for src, dst in [('srcdir', 'src/dest'), ('srcdir', 'srcdir.new')]:
src = os.path.join(TESTFN, src)
dst = os.path.join(TESTFN, dst)
self.assertFalse(shutil._destinsrc(src, dst),
msg='_destinsrc() wrongly concluded that '
'dst (%s) is in src (%s)' % (dst, src))
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
class TestCopyFile(unittest.TestCase):
_delete = False
class Faux(object):
_entered = False
_exited_with = None
_raised = False
def __init__(self, raise_in_exit=False, suppress_at_exit=True):
self._raise_in_exit = raise_in_exit
self._suppress_at_exit = suppress_at_exit
def read(self, *args):
return ''
def __enter__(self):
self._entered = True
def __exit__(self, exc_type, exc_val, exc_tb):
self._exited_with = exc_type, exc_val, exc_tb
if self._raise_in_exit:
self._raised = True
raise IOError("Cannot close")
return self._suppress_at_exit
def tearDown(self):
if self._delete:
del shutil.open
def _set_shutil_open(self, func):
shutil.open = func
self._delete = True
def test_w_source_open_fails(self):
def _open(filename, mode='r'):
if filename == 'srcfile':
raise IOError('Cannot open "srcfile"')
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
self.assertRaises(IOError, shutil.copyfile, 'srcfile', 'destfile')
def test_w_dest_open_fails(self):
srcfile = self.Faux()
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
raise IOError('Cannot open "destfile"')
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
shutil.copyfile('srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(srcfile._exited_with[0] is IOError)
self.assertEqual(srcfile._exited_with[1].args,
('Cannot open "destfile"',))
def test_w_dest_close_fails(self):
srcfile = self.Faux()
destfile = self.Faux(True)
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
return destfile
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
shutil.copyfile('srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(destfile._entered)
self.assertTrue(destfile._raised)
self.assertTrue(srcfile._exited_with[0] is IOError)
self.assertEqual(srcfile._exited_with[1].args,
('Cannot close',))
def test_w_source_close_fails(self):
srcfile = self.Faux(True)
destfile = self.Faux()
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
return destfile
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
self.assertRaises(IOError,
shutil.copyfile, 'srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(destfile._entered)
self.assertFalse(destfile._raised)
self.assertTrue(srcfile._exited_with[0] is None)
self.assertTrue(srcfile._raised)
def test_move_dir_caseinsensitive(self):
# Renames a folder to the same name
# but a different case.
self.src_dir = tempfile.mkdtemp()
dst_dir = os.path.join(
os.path.dirname(self.src_dir),
os.path.basename(self.src_dir).upper())
self.assertNotEqual(self.src_dir, dst_dir)
try:
shutil.move(self.src_dir, dst_dir)
self.assertTrue(os.path.isdir(dst_dir))
finally:
if os.path.exists(dst_dir):
os.rmdir(dst_dir)
def test_main():
test_support.run_unittest(TestShutil, TestMove, TestCopyFile)
if __name__ == '__main__':
test_main()
|
thomdixon/elasticsearch-py
|
refs/heads/master
|
elasticsearch/connection/esthrift/__init__.py
|
36
|
__all__ = ['ttypes', 'constants', 'Rest']
|
YinongLong/scikit-learn
|
refs/heads/master
|
sklearn/ensemble/weight_boosting.py
|
12
|
"""Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseWeightBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <noel@dawe.me>
# Gilles Louppe <g.louppe@gmail.com>
# Hamzeh Alsalhi <ha258@cornell.edu>
# Arnaud Joly <arnaud.v.joly@gmail.com>
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin, is_regressor
from ..externals import six
from ..externals.six.moves import zip
from ..externals.six.moves import xrange as range
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..metrics import accuracy_score, r2_score
from sklearn.utils.validation import has_fit_parameter, check_is_fitted
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if (self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree,
BaseForest))):
dtype = DTYPE
accept_sparse = 'csc'
else:
dtype = None
accept_sparse = ['csr', 'csc']
X, y = check_X_y(X, y, accept_sparse=accept_sparse, dtype=dtype,
y_numeric=is_regressor(self))
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float64)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float64)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float64)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Warning: This method needs to be overridden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _validate_X_predict(self, X):
"""Ensure that X is in the proper format"""
if (self.base_estimator is None or
isinstance(self.base_estimator,
(BaseDecisionTree, BaseForest))):
X = check_array(X, accept_sparse='csr', dtype=DTYPE)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return X
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported" % self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError("%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__)
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight)
def _boost_real(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
proba[proba < np.finfo(proba.dtype).eps] = np.finfo(proba.dtype).eps
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError('BaseClassifier in AdaBoostClassifier '
'ensemble is worse than random, ensemble '
'can not be fit.')
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self, "n_classes_")
X = self._validate_X_predict(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
check_is_fitted(self, "n_classes_")
n_classes = self.n_classes_
X = self._validate_X_predict(X)
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
X = self._validate_X_predict(X)
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators.
estimator_weights_ : array of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : array of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
generator = check_random_state(self.random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = generator.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
check_is_fitted(self, "estimator_weights_")
X = self._validate_X_predict(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
|
aabbox/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/site-packages/pip/_vendor/html5lib/treewalkers/lxmletree.py
|
355
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from lxml import etree
from ..treebuilders.etree import tag_regexp
from gettext import gettext
_ = gettext
from . import _base
from .. import ihatexml
def ensure_str(s):
if s is None:
return None
elif isinstance(s, text_type):
return s
else:
return s.decode("utf-8", "strict")
class Root(object):
def __init__(self, et):
self.elementtree = et
self.children = []
if et.docinfo.internalDTD:
self.children.append(Doctype(self,
ensure_str(et.docinfo.root_name),
ensure_str(et.docinfo.public_id),
ensure_str(et.docinfo.system_url)))
root = et.getroot()
node = root
while node.getprevious() is not None:
node = node.getprevious()
while node is not None:
self.children.append(node)
node = node.getnext()
self.text = None
self.tail = None
def __getitem__(self, key):
return self.children[key]
def getnext(self):
return None
def __len__(self):
return 1
class Doctype(object):
def __init__(self, root_node, name, public_id, system_id):
self.root_node = root_node
self.name = name
self.public_id = public_id
self.system_id = system_id
self.text = None
self.tail = None
def getnext(self):
return self.root_node.children[1]
class FragmentRoot(Root):
def __init__(self, children):
self.children = [FragmentWrapper(self, child) for child in children]
self.text = self.tail = None
def getnext(self):
return None
class FragmentWrapper(object):
def __init__(self, fragment_root, obj):
self.root_node = fragment_root
self.obj = obj
if hasattr(self.obj, 'text'):
self.text = ensure_str(self.obj.text)
else:
self.text = None
if hasattr(self.obj, 'tail'):
self.tail = ensure_str(self.obj.tail)
else:
self.tail = None
self.isstring = isinstance(obj, str) or isinstance(obj, bytes)
# Support for bytes here is Py2
if self.isstring:
self.obj = ensure_str(self.obj)
def __getattr__(self, name):
return getattr(self.obj, name)
def getnext(self):
siblings = self.root_node.children
idx = siblings.index(self)
if idx < len(siblings) - 1:
return siblings[idx + 1]
else:
return None
def __getitem__(self, key):
return self.obj[key]
def __bool__(self):
return bool(self.obj)
def getparent(self):
return None
def __str__(self):
return str(self.obj)
def __unicode__(self):
return str(self.obj)
def __len__(self):
return len(self.obj)
class TreeWalker(_base.NonRecursiveTreeWalker):
def __init__(self, tree):
if hasattr(tree, "getroot"):
tree = Root(tree)
elif isinstance(tree, list):
tree = FragmentRoot(tree)
_base.NonRecursiveTreeWalker.__init__(self, tree)
self.filter = ihatexml.InfosetFilter()
def getNodeDetails(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
return _base.TEXT, ensure_str(getattr(node, key))
elif isinstance(node, Root):
return (_base.DOCUMENT,)
elif isinstance(node, Doctype):
return _base.DOCTYPE, node.name, node.public_id, node.system_id
elif isinstance(node, FragmentWrapper) and node.isstring:
return _base.TEXT, node.obj
elif node.tag == etree.Comment:
return _base.COMMENT, ensure_str(node.text)
elif node.tag == etree.Entity:
return _base.ENTITY, ensure_str(node.text)[1:-1] # strip &;
else:
# This is assumed to be an ordinary element
match = tag_regexp.match(ensure_str(node.tag))
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = ensure_str(node.tag)
attrs = {}
for name, value in list(node.attrib.items()):
name = ensure_str(name)
value = ensure_str(value)
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (_base.ELEMENT, namespace, self.filter.fromXmlName(tag),
attrs, len(node) > 0 or node.text)
def getFirstChild(self, node):
assert not isinstance(node, tuple), _("Text nodes have no children")
assert len(node) or node.text, "Node has no children"
if node.text:
return (node, "text")
else:
return node[0]
def getNextSibling(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
if key == "text":
# XXX: we cannot use a "bool(node) and node[0] or None" construct here
# because node[0] might evaluate to False if it has no child element
if len(node):
return node[0]
else:
return None
else: # tail
return node.getnext()
return (node, "tail") if node.tail else node.getnext()
def getParentNode(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
if key == "text":
return node
# else: fallback to "normal" processing
return node.getparent()
|
ilarischeinin/chipster
|
refs/heads/master
|
ext/applications/apache-activemq-5.10.0/examples/stomp/python/stomppy/listener.py
|
7
|
#!/usr/bin/env python
# ------------------------------------------------------------------------
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
import time
import sys
import os
import stomp
user = os.getenv("ACTIVEMQ_USER") or "admin"
password = os.getenv("ACTIVEMQ_PASSWORD") or "password"
host = os.getenv("ACTIVEMQ_HOST") or "localhost"
port = os.getenv("ACTIVEMQ_PORT") or 61613
destination = sys.argv[1:2] or ["/topic/event"]
destination = destination[0]
class MyListener(object):
def __init__(self, conn):
self.conn = conn
self.count = 0
self.start = time.time()
def on_error(self, headers, message):
print('received an error %s' % message)
def on_message(self, headers, message):
if message == "SHUTDOWN":
diff = time.time() - self.start
print("Received %s in %f seconds" % (self.count, diff))
conn.disconnect()
sys.exit(0)
else:
if self.count==0:
self.start = time.time()
self.count += 1
if self.count % 1000 == 0:
print("Received %s messages." % self.count)
conn = stomp.Connection(host_and_ports = [(host, port)])
conn.set_listener('', MyListener(conn))
conn.start()
conn.connect(login=user,passcode=password)
conn.subscribe(destination=destination, ack='auto')
print("Waiting for messages...")
while 1:
time.sleep(10)
|
SanketDG/networkx
|
refs/heads/master
|
networkx/algorithms/approximation/clique.py
|
11
|
# -*- coding: utf-8 -*-
"""
Cliques.
"""
# Copyright (C) 2011-2012 by
# Nicholas Mancuso <nick.mancuso@gmail.com>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.algorithms.approximation import ramsey
__author__ = """Nicholas Mancuso (nick.mancuso@gmail.com)"""
__all__ = ["clique_removal","max_clique"]
def max_clique(G):
r"""Find the Maximum Clique
Finds the `O(|V|/(log|V|)^2)` apx of maximum clique/independent set
in the worst case.
Parameters
----------
G : NetworkX graph
Undirected graph
Returns
-------
clique : set
The apx-maximum clique of the graph
Notes
------
A clique in an undirected graph G = (V, E) is a subset of the vertex set
`C \subseteq V`, such that for every two vertices in C, there exists an edge
connecting the two. This is equivalent to saying that the subgraph
induced by C is complete (in some cases, the term clique may also refer
to the subgraph).
A maximum clique is a clique of the largest possible size in a given graph.
The clique number `\omega(G)` of a graph G is the number of
vertices in a maximum clique in G. The intersection number of
G is the smallest number of cliques that together cover all edges of G.
http://en.wikipedia.org/wiki/Maximum_clique
References
----------
.. [1] Boppana, R., & Halldórsson, M. M. (1992).
Approximating maximum independent sets by excluding subgraphs.
BIT Numerical Mathematics, 32(2), 180–196. Springer.
doi:10.1007/BF01994876
"""
if G is None:
raise ValueError("Expected NetworkX graph!")
# finding the maximum clique in a graph is equivalent to finding
# the independent set in the complementary graph
cgraph = nx.complement(G)
iset, _ = clique_removal(cgraph)
return iset
def clique_removal(G):
""" Repeatedly remove cliques from the graph.
Results in a `O(|V|/(\log |V|)^2)` approximation of maximum clique
& independent set. Returns the largest independent set found, along
with found maximal cliques.
Parameters
----------
G : NetworkX graph
Undirected graph
Returns
-------
max_ind_cliques : (set, list) tuple
Maximal independent set and list of maximal cliques (sets) in the graph.
References
----------
.. [1] Boppana, R., & Halldórsson, M. M. (1992).
Approximating maximum independent sets by excluding subgraphs.
BIT Numerical Mathematics, 32(2), 180–196. Springer.
"""
graph = G.copy(with_data=False)
c_i, i_i = ramsey.ramsey_R2(graph)
cliques = [c_i]
isets = [i_i]
while graph:
graph.remove_nodes_from(c_i)
c_i, i_i = ramsey.ramsey_R2(graph)
if c_i:
cliques.append(c_i)
if i_i:
isets.append(i_i)
# Determine the largest independent set as measured by cardinality.
maxiset = max(isets, key=len)
return maxiset, cliques
|
kylejusticemagnuson/pyti
|
refs/heads/master
|
tests/test_moving_average_envelope.py
|
1
|
from __future__ import absolute_import
import unittest
import numpy as np
from tests.sample_data import SampleData
from pyti import moving_average_envelope
class TestMovingAverageEnvelope(unittest.TestCase):
def setUp(self):
"""Create data to use for testing."""
self.data = SampleData().get_sample_close_data()
self.center_band_period_6_expected = [np.nan, np.nan, np.nan, np.nan,
np.nan, 804.55166666666673, 807.84333333333336, 809.89666666666665,
811.21833333333325, 811.20333333333338, 812.51166666666666,
813.88000000000011, 814.40333333333331, 813.18666666666661,
812.6783333333334, 810.23333333333346, 806.20333333333338,
799.25166666666667, 793.06499999999994, 785.82499999999993,
778.30499999999995, 775.09000000000003, 774.75166666666667,
776.35333333333347, 776.68333333333339, 779.10666666666668,
782.55166666666673, 784.03833333333341, 781.79333333333341,
781.85500000000002, 781.81833333333327, 781.17833333333328,
775.88166666666666, 773.70666666666659, 774.42666666666662,
777.66499999999996, 782.99833333333333, 787.4766666666668,
792.12333333333345, 793.86333333333334, 795.21833333333336,
795.20000000000016, 794.85333333333335, 797.77499999999998,
803.81666666666672, 810.46833333333336, 817.15666666666664,
822.19999999999993, 824.55999999999983, 824.90499999999986,
826.52833333333331, 826.42666666666662, 822.80833333333339,
817.61833333333345, 814.28833333333341, 812.64499999999998,
809.72499999999991, 808.505, 807.48333333333323, 807.23000000000002,
806.75500000000011, 805.25833333333321, 803.72666666666657,
802.04166666666663, 802.36333333333334, 803.52666666666664,
805.11000000000001, 805.08666666666659, 807.51666666666677,
809.49833333333333, 809.89666666666665, 808.18333333333328,
805.62666666666667, 804.84666666666669, 802.55833333333339,
798.31000000000006, 795.5916666666667, 795.43166666666673,
794.28000000000009, 795.0916666666667, 796.21833333333336,
799.1450000000001, 800.50333333333344, 799.26666666666677, 799.495,
797.67500000000007, 795.64666666666665, 793.17999999999995,
792.25166666666667, 792.61833333333345, 793.74166666666667,
794.58000000000004, 795.21833333333325, 796.80666666666673,
799.15999999999997, 800.42500000000007, 801.98666666666668,
803.67000000000007, 805.09499999999991, 806.05166666666662,
806.39499999999987, 807.06833333333327, 807.23000000000002,
805.59666666666669, 804.04999999999984, 802.65500000000009,
801.56499999999994, 799.25, 792.40166666666664, 786.52166666666665,
779.64333333333332, 772.54333333333341, 765.60000000000002,
759.44500000000005, 757.98500000000001, 756.55833333333328,
755.81666666666661, 752.16833333333341, 748.25500000000011,
744.10000000000002, 740.005, 735.63666666666666, 729.73333333333323,
725.005, 720.53333333333342, 716.43500000000006, 712.72500000000002]
self.upper_band_period_6_expected = [np.nan, np.nan, np.nan, np.nan,
np.nan, 885.00683333333347, 888.62766666666676, 890.88633333333337,
892.34016666666662, 892.32366666666678, 893.76283333333345,
895.26800000000014, 895.84366666666676, 894.5053333333334,
893.94616666666684, 891.25666666666689, 886.82366666666678,
879.17683333333343, 872.37149999999997, 864.40750000000003,
856.13549999999998, 852.59900000000016, 852.22683333333339,
853.98866666666686, 854.3516666666668, 857.01733333333345,
860.80683333333343, 862.44216666666682, 859.97266666666678,
860.04050000000007, 860.0001666666667, 859.29616666666664,
853.46983333333344, 851.07733333333329, 851.86933333333332,
855.43150000000003, 861.2981666666667, 866.22433333333356,
871.33566666666684, 873.24966666666671, 874.74016666666682,
874.72000000000025, 874.33866666666677, 877.55250000000001,
884.19833333333349, 891.5151666666668, 898.87233333333336,
904.41999999999996, 907.01599999999985, 907.39549999999997,
909.18116666666674, 909.06933333333336, 905.08916666666676,
899.38016666666692, 895.7171666666668, 893.90950000000009,
890.69749999999999, 889.35550000000012, 888.23166666666668,
887.95300000000009, 887.43050000000017, 885.78416666666658,
884.09933333333333, 882.24583333333339, 882.59966666666674,
883.87933333333342, 885.62100000000009, 885.59533333333331,
888.26833333333354, 890.44816666666679, 890.88633333333337,
889.00166666666667, 886.18933333333337, 885.33133333333342,
882.81416666666678, 878.14100000000019, 875.15083333333348,
874.97483333333344, 873.7080000000002, 874.60083333333341,
875.84016666666673, 879.05950000000018, 880.55366666666691,
879.1933333333335, 879.44450000000006, 877.44250000000011,
875.21133333333341, 872.49800000000005, 871.47683333333339,
871.88016666666692, 873.1158333333334, 874.03800000000012,
874.7401666666666, 876.48733333333348, 879.07600000000002,
880.4675000000002, 882.18533333333346, 884.03700000000015,
885.60450000000003, 886.65683333333334, 887.03449999999998,
887.77516666666668, 887.95300000000009, 886.15633333333346,
884.45499999999993, 882.92050000000017, 881.72149999999999,
879.17500000000007, 871.64183333333335, 865.17383333333339,
857.60766666666677, 849.79766666666683, 842.16000000000008,
835.38950000000011, 833.78350000000012, 832.21416666666664,
831.39833333333331, 827.38516666666681, 823.08050000000014,
818.5100000000001, 814.0055000000001, 809.20033333333333,
802.70666666666659, 797.5055000000001, 792.58666666666682,
788.07850000000008, 783.99750000000006]
self.lower_band_period_6_expected = [np.nan, np.nan, np.nan, np.nan,
np.nan, 724.09650000000011, 727.05900000000008, 728.90700000000004,
730.09649999999999, 730.08300000000008, 731.26049999999998,
732.49200000000008, 732.96299999999997, 731.86799999999994,
731.41050000000007, 729.21000000000015, 725.58300000000008,
719.32650000000001, 713.75849999999991, 707.24249999999995,
700.47449999999992, 697.58100000000002, 697.27650000000006,
698.71800000000019, 699.0150000000001, 701.19600000000003,
704.29650000000004, 705.63450000000012, 703.61400000000003,
703.66950000000008, 703.63649999999996, 703.06049999999993,
698.29349999999999, 696.3359999999999, 696.98399999999992,
699.89850000000001, 704.69849999999997, 708.72900000000016,
712.91100000000017, 714.47699999999998, 715.69650000000001,
715.68000000000018, 715.36800000000005, 717.99749999999995,
723.43500000000006, 729.42150000000004, 735.44100000000003,
739.9799999999999, 742.10399999999981, 742.41449999999986,
743.87549999999999, 743.78399999999999, 740.52750000000003,
735.8565000000001, 732.85950000000014, 731.38049999999998,
728.75249999999994, 727.65449999999998, 726.7349999999999,
726.50700000000006, 726.07950000000017, 724.73249999999996,
723.35399999999993, 721.83749999999998, 722.12700000000007,
723.17399999999998, 724.59900000000005, 724.57799999999997,
726.7650000000001, 728.54849999999999, 728.90700000000004,
727.36500000000001, 725.06399999999996, 724.36200000000008,
722.30250000000012, 718.47900000000004, 716.03250000000003,
715.88850000000002, 714.85200000000009, 715.5825000000001,
716.59649999999999, 719.23050000000012, 720.45300000000009,
719.34000000000015, 719.54550000000006, 717.90750000000003,
716.08199999999999, 713.86199999999997, 713.02650000000006,
713.3565000000001, 714.36750000000006, 715.12200000000007,
715.6964999999999, 717.12600000000009, 719.24400000000003,
720.38250000000005, 721.78800000000001, 723.30300000000011,
724.58549999999991, 725.44650000000001, 725.75549999999987,
726.36149999999998, 726.50700000000006, 725.03700000000003,
723.64499999999987, 722.38950000000011, 721.4085, 719.32500000000005,
713.16150000000005, 707.86950000000002, 701.67899999999997,
695.2890000000001, 689.04000000000008, 683.5005000000001,
682.18650000000002, 680.90249999999992, 680.23500000000001,
676.95150000000012, 673.42950000000008, 669.69000000000005,
666.00450000000001, 662.07299999999998, 656.75999999999988,
652.50450000000001, 648.48000000000013, 644.79150000000004,
641.45249999999999]
def test_center_band_period_6(self):
period = 6
cb = moving_average_envelope.center_band(self.data, period)
np.testing.assert_array_equal(cb, self.center_band_period_6_expected)
def test_center_band_invalid_period(self):
period = 128
with self.assertRaises(Exception) as cm:
moving_average_envelope.center_band(self.data, period)
expected = "Error: data_len < period"
self.assertEqual(str(cm.exception), expected)
def test_upper_band_period_6(self):
period = 6
env_percentage = 0.1
ub = moving_average_envelope.upper_band(self.data, period, env_percentage)
np.testing.assert_array_equal(ub, self.upper_band_period_6_expected)
def test_upper_band_invalid_period(self):
period = 128
env_percentage = 0.1
with self.assertRaises(Exception) as cm:
moving_average_envelope.upper_band(self.data, period, env_percentage)
expected = "Error: data_len < period"
self.assertEqual(str(cm.exception), expected)
def test_lower_band_period_6(self):
period = 6
env_percentage = 0.1
lb = moving_average_envelope.lower_band(self.data, period, env_percentage)
np.testing.assert_array_equal(lb, self.lower_band_period_6_expected)
def test_lower_band_invalid_period(self):
period = 128
env_percentage = 0.1
with self.assertRaises(Exception) as cm:
moving_average_envelope.lower_band(self.data, period, env_percentage)
expected = "Error: data_len < period"
self.assertEqual(str(cm.exception), expected)
|
otherness-space/myProject002
|
refs/heads/master
|
my_project_002/lib/python2.7/site-packages/django/contrib/admin/templatetags/admin_list.py
|
103
|
from __future__ import unicode_literals
import datetime
from django.contrib.admin.util import (lookup_field, display_for_field,
display_for_value, label_for_field)
from django.contrib.admin.views.main import (ALL_VAR, EMPTY_CHANGELIST_VALUE,
ORDER_VAR, PAGE_VAR, SEARCH_VAR)
from django.contrib.admin.templatetags.admin_static import static
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.utils import formats
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils import six
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.utils.encoding import smart_text, force_text
from django.template import Library
from django.template.loader import get_template
from django.template.context import Context
register = Library()
DOT = '.'
@register.simple_tag
def paginator_number(cl,i):
"""
Generates an individual page index link in a paginated list.
"""
if i == DOT:
return '... '
elif i == cl.page_num:
return format_html('<span class="this-page">{0}</span> ', i+1)
else:
return format_html('<a href="{0}"{1}>{2}</a> ',
cl.get_query_string({PAGE_VAR: i}),
mark_safe(' class="end"' if i == cl.paginator.num_pages-1 else ''),
i+1)
@register.inclusion_tag('admin/pagination.html')
def pagination(cl):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator, page_num = cl.paginator, cl.page_num
pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_EACH_SIDE - 1))
page_range.append(DOT)
page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'cl': cl,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
'ALL_VAR': ALL_VAR,
'1': 1,
}
def result_headers(cl):
"""
Generates the list column headers.
"""
ordering_field_columns = cl.get_ordering_field_columns()
for i, field_name in enumerate(cl.list_display):
text, attr = label_for_field(field_name, cl.model,
model_admin = cl.model_admin,
return_attr = True
)
if attr:
# Potentially not sortable
# if the field is the action checkbox: no sorting and special class
if field_name == 'action_checkbox':
yield {
"text": text,
"class_attrib": mark_safe(' class="action-checkbox-column"'),
"sortable": False,
}
continue
admin_order_field = getattr(attr, "admin_order_field", None)
if not admin_order_field:
# Not sortable
yield {
"text": text,
"sortable": False,
}
continue
# OK, it is sortable if we got this far
th_classes = ['sortable']
order_type = ''
new_order_type = 'asc'
sort_priority = 0
sorted = False
# Is it currently being sorted on?
if i in ordering_field_columns:
sorted = True
order_type = ordering_field_columns.get(i).lower()
sort_priority = list(ordering_field_columns).index(i) + 1
th_classes.append('sorted %sending' % order_type)
new_order_type = {'asc': 'desc', 'desc': 'asc'}[order_type]
# build new ordering param
o_list_primary = [] # URL for making this field the primary sort
o_list_remove = [] # URL for removing this field from sort
o_list_toggle = [] # URL for toggling order type for this field
make_qs_param = lambda t, n: ('-' if t == 'desc' else '') + str(n)
for j, ot in ordering_field_columns.items():
if j == i: # Same column
param = make_qs_param(new_order_type, j)
# We want clicking on this header to bring the ordering to the
# front
o_list_primary.insert(0, param)
o_list_toggle.append(param)
# o_list_remove - omit
else:
param = make_qs_param(ot, j)
o_list_primary.append(param)
o_list_toggle.append(param)
o_list_remove.append(param)
if i not in ordering_field_columns:
o_list_primary.insert(0, make_qs_param(new_order_type, i))
yield {
"text": text,
"sortable": True,
"sorted": sorted,
"ascending": order_type == "asc",
"sort_priority": sort_priority,
"url_primary": cl.get_query_string({ORDER_VAR: '.'.join(o_list_primary)}),
"url_remove": cl.get_query_string({ORDER_VAR: '.'.join(o_list_remove)}),
"url_toggle": cl.get_query_string({ORDER_VAR: '.'.join(o_list_toggle)}),
"class_attrib": format_html(' class="{0}"', ' '.join(th_classes))
if th_classes else '',
}
def _boolean_icon(field_val):
icon_url = static('admin/img/icon-%s.gif' %
{True: 'yes', False: 'no', None: 'unknown'}[field_val])
return format_html('<img src="{0}" alt="{1}" />', icon_url, field_val)
def items_for_result(cl, result, form):
"""
Generates the actual list of data.
"""
first = True
pk = cl.lookup_opts.pk.attname
for field_name in cl.list_display:
row_class = ''
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except ObjectDoesNotExist:
result_repr = EMPTY_CHANGELIST_VALUE
else:
if f is None:
if field_name == 'action_checkbox':
row_class = mark_safe(' class="action-checkbox"')
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean:
allow_tags = True
result_repr = display_for_value(value, boolean)
# Strip HTML tags in the resulting text, except if the
# function has an "allow_tags" attribute set to True.
if allow_tags:
result_repr = mark_safe(result_repr)
if isinstance(value, (datetime.date, datetime.time)):
row_class = mark_safe(' class="nowrap"')
else:
if isinstance(f.rel, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = EMPTY_CHANGELIST_VALUE
else:
result_repr = field_val
else:
result_repr = display_for_field(value, f)
if isinstance(f, (models.DateField, models.TimeField, models.ForeignKey)):
row_class = mark_safe(' class="nowrap"')
if force_text(result_repr) == '':
result_repr = mark_safe(' ')
# If list_display_links not defined, add the link tag to the first field
if (first and not cl.list_display_links) or field_name in cl.list_display_links:
table_tag = {True:'th', False:'td'}[first]
first = False
url = cl.url_for_result(result)
# Convert the pk to something that can be used in Javascript.
# Problem cases are long ints (23L) and non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
result_id = repr(force_text(value))[1:]
yield format_html('<{0}{1}><a href="{2}"{3}>{4}</a></{5}>',
table_tag,
row_class,
url,
format_html(' onclick="opener.dismissRelatedLookupPopup(window, {0}); return false;"', result_id)
if cl.is_popup else '',
result_repr,
table_tag)
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if (form and field_name in form.fields and not (
field_name == cl.model._meta.pk.name and
form[cl.model._meta.pk.name].is_hidden)):
bf = form[field_name]
result_repr = mark_safe(force_text(bf.errors) + force_text(bf))
yield format_html('<td{0}>{1}</td>', row_class, result_repr)
if form and not form[cl.model._meta.pk.name].is_hidden:
yield format_html('<td>{0}</td>', force_text(form[cl.model._meta.pk.name]))
class ResultList(list):
# Wrapper class used to return items in a list_editable
# changelist, annotated with the form object for error
# reporting purposes. Needed to maintain backwards
# compatibility with existing admin templates.
def __init__(self, form, *items):
self.form = form
super(ResultList, self).__init__(*items)
def results(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
yield ResultList(form, items_for_result(cl, res, form))
else:
for res in cl.result_list:
yield ResultList(None, items_for_result(cl, res, None))
def result_hidden_fields(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
if form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(force_text(form[cl.model._meta.pk.name]))
@register.inclusion_tag("admin/change_list_results.html")
def result_list(cl):
"""
Displays the headers and data list together
"""
headers = list(result_headers(cl))
num_sorted_fields = 0
for h in headers:
if h['sortable'] and h['sorted']:
num_sorted_fields += 1
return {'cl': cl,
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': headers,
'num_sorted_fields': num_sorted_fields,
'results': list(results(cl))}
@register.inclusion_tag('admin/date_hierarchy.html')
def date_hierarchy(cl):
"""
Displays the date hierarchy for date drill-down functionality.
"""
if cl.date_hierarchy:
field_name = cl.date_hierarchy
year_field = '%s__year' % field_name
month_field = '%s__month' % field_name
day_field = '%s__day' % field_name
field_generic = '%s__' % field_name
year_lookup = cl.params.get(year_field)
month_lookup = cl.params.get(month_field)
day_lookup = cl.params.get(day_field)
link = lambda d: cl.get_query_string(d, [field_generic])
if not (year_lookup or month_lookup or day_lookup):
# select appropriate start level
date_range = cl.query_set.aggregate(first=models.Min(field_name),
last=models.Max(field_name))
if date_range['first'] and date_range['last']:
if date_range['first'].year == date_range['last'].year:
year_lookup = date_range['first'].year
if date_range['first'].month == date_range['last'].month:
month_lookup = date_range['first'].month
if year_lookup and month_lookup and day_lookup:
day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup))
return {
'show': True,
'back': {
'link': link({year_field: year_lookup, month_field: month_lookup}),
'title': capfirst(formats.date_format(day, 'YEAR_MONTH_FORMAT'))
},
'choices': [{'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))}]
}
elif year_lookup and month_lookup:
days = cl.query_set.filter(**{year_field: year_lookup, month_field: month_lookup}).dates(field_name, 'day')
return {
'show': True,
'back': {
'link': link({year_field: year_lookup}),
'title': str(year_lookup)
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month_lookup, day_field: day.day}),
'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))
} for day in days]
}
elif year_lookup:
months = cl.query_set.filter(**{year_field: year_lookup}).dates(field_name, 'month')
return {
'show' : True,
'back': {
'link' : link({}),
'title': _('All dates')
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month.month}),
'title': capfirst(formats.date_format(month, 'YEAR_MONTH_FORMAT'))
} for month in months]
}
else:
years = cl.query_set.dates(field_name, 'year')
return {
'show': True,
'choices': [{
'link': link({year_field: str(year.year)}),
'title': str(year.year),
} for year in years]
}
@register.inclusion_tag('admin/search_form.html')
def search_form(cl):
"""
Displays a search form for searching the list.
"""
return {
'cl': cl,
'show_result_count': cl.result_count != cl.full_result_count,
'search_var': SEARCH_VAR
}
@register.simple_tag
def admin_list_filter(cl, spec):
tpl = get_template(spec.template)
return tpl.render(Context({
'title': spec.title,
'choices' : list(spec.choices(cl)),
'spec': spec,
}))
@register.inclusion_tag('admin/actions.html', takes_context=True)
def admin_actions(context):
"""
Track the number of times the action field has been rendered on the page,
so we know which value to use.
"""
context['action_index'] = context.get('action_index', -1) + 1
return context
|
tcwicklund/django
|
refs/heads/master
|
django/contrib/postgres/forms/ranges.py
|
393
|
from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange
from django import forms
from django.core import exceptions
from django.forms.widgets import MultiWidget
from django.utils.translation import ugettext_lazy as _
__all__ = ['IntegerRangeField', 'FloatRangeField', 'DateTimeRangeField', 'DateRangeField']
class BaseRangeField(forms.MultiValueField):
default_error_messages = {
'invalid': _('Enter two valid values.'),
'bound_ordering': _('The start of the range must not exceed the end of the range.'),
}
def __init__(self, **kwargs):
kwargs.setdefault('widget', RangeWidget(self.base_field.widget))
kwargs.setdefault('fields', [self.base_field(required=False), self.base_field(required=False)])
kwargs.setdefault('required', False)
kwargs.setdefault('require_all_fields', False)
super(BaseRangeField, self).__init__(**kwargs)
def prepare_value(self, value):
lower_base, upper_base = self.fields
if isinstance(value, self.range_type):
return [
lower_base.prepare_value(value.lower),
upper_base.prepare_value(value.upper),
]
if value is None:
return [
lower_base.prepare_value(None),
upper_base.prepare_value(None),
]
return value
def compress(self, values):
if not values:
return None
lower, upper = values
if lower is not None and upper is not None and lower > upper:
raise exceptions.ValidationError(
self.error_messages['bound_ordering'],
code='bound_ordering',
)
try:
range_value = self.range_type(lower, upper)
except TypeError:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
)
else:
return range_value
class IntegerRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two whole numbers.')}
base_field = forms.IntegerField
range_type = NumericRange
class FloatRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two numbers.')}
base_field = forms.FloatField
range_type = NumericRange
class DateTimeRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two valid date/times.')}
base_field = forms.DateTimeField
range_type = DateTimeTZRange
class DateRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two valid dates.')}
base_field = forms.DateField
range_type = DateRange
class RangeWidget(MultiWidget):
def __init__(self, base_widget, attrs=None):
widgets = (base_widget, base_widget)
super(RangeWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
return (value.lower, value.upper)
return (None, None)
|
skinkie/SleekXMPP--XEP-0080-
|
refs/heads/master
|
sleekxmpp/stanza/iq.py
|
1
|
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from sleekxmpp.stanza import Error
from sleekxmpp.stanza.rootstanza import RootStanza
from sleekxmpp.xmlstream import RESPONSE_TIMEOUT, StanzaBase, ET
from sleekxmpp.xmlstream.handler import Waiter
from sleekxmpp.xmlstream.matcher import MatcherId
class Iq(RootStanza):
"""
XMPP <iq> stanzas, or info/query stanzas, are XMPP's method of
requesting and modifying information, similar to HTTP's GET and
POST methods.
Each <iq> stanza must have an 'id' value which associates the
stanza with the response stanza. XMPP entities must always
be given a response <iq> stanza with a type of 'result' after
sending a stanza of type 'get' or 'set'.
Most uses cases for <iq> stanzas will involve adding a <query>
element whose namespace indicates the type of information
desired. However, some custom XMPP applications use <iq> stanzas
as a carrier stanza for an application-specific protocol instead.
Example <iq> Stanzas:
<iq to="user@example.com" type="get" id="314">
<query xmlns="http://jabber.org/protocol/disco#items" />
</iq>
<iq to="user@localhost" type="result" id="17">
<query xmlns='jabber:iq:roster'>
<item jid='otheruser@example.net'
name='John Doe'
subscription='both'>
<group>Friends</group>
</item>
</query>
</iq>
Stanza Interface:
query -- The namespace of the <query> element if one exists.
Attributes:
types -- May be one of: get, set, result, or error.
Methods:
__init__ -- Overrides StanzaBase.__init__.
unhandled -- Send error if there are no handlers.
set_payload -- Overrides StanzaBase.set_payload.
set_query -- Add or modify a <query> element.
get_query -- Return the namespace of the <query> element.
del_query -- Remove the <query> element.
reply -- Overrides StanzaBase.reply
send -- Overrides StanzaBase.send
"""
namespace = 'jabber:client'
name = 'iq'
interfaces = set(('type', 'to', 'from', 'id', 'query'))
types = set(('get', 'result', 'set', 'error'))
plugin_attrib = name
def __init__(self, *args, **kwargs):
"""
Initialize a new <iq> stanza with an 'id' value.
Overrides StanzaBase.__init__.
"""
StanzaBase.__init__(self, *args, **kwargs)
# To comply with PEP8, method names now use underscores.
# Deprecated method names are re-mapped for backwards compatibility.
self.setPayload = self.set_payload
self.getQuery = self.get_query
self.setQuery = self.set_query
self.delQuery = self.del_query
if self['id'] == '':
if self.stream is not None:
self['id'] = self.stream.getNewId()
else:
self['id'] = '0'
def unhandled(self):
"""
Send a feature-not-implemented error if the stanza is not handled.
Overrides StanzaBase.unhandled.
"""
if self['type'] in ('get', 'set'):
self.reply()
self['error']['condition'] = 'feature-not-implemented'
self['error']['text'] = 'No handlers registered for this request.'
self.send()
def set_payload(self, value):
"""
Set the XML contents of the <iq> stanza.
Arguments:
value -- An XML object to use as the <iq> stanza's contents
"""
self.clear()
StanzaBase.set_payload(self, value)
return self
def set_query(self, value):
"""
Add or modify a <query> element.
Query elements are differentiated by their namespace.
Arguments:
value -- The namespace of the <query> element.
"""
query = self.xml.find("{%s}query" % value)
if query is None and value:
self.clear()
query = ET.Element("{%s}query" % value)
self.xml.append(query)
return self
def get_query(self):
"""Return the namespace of the <query> element."""
for child in self.xml.getchildren():
if child.tag.endswith('query'):
ns = child.tag.split('}')[0]
if '{' in ns:
ns = ns[1:]
return ns
return ''
def del_query(self):
"""Remove the <query> element."""
for child in self.xml.getchildren():
if child.tag.endswith('query'):
self.xml.remove(child)
return self
def reply(self):
"""
Send a reply <iq> stanza.
Overrides StanzaBase.reply
Sets the 'type' to 'result' in addition to the default
StanzaBase.reply behavior.
"""
self['type'] = 'result'
StanzaBase.reply(self)
return self
def send(self, block=True, timeout=RESPONSE_TIMEOUT):
"""
Send an <iq> stanza over the XML stream.
The send call can optionally block until a response is received or
a timeout occurs. Be aware that using blocking in non-threaded event
handlers can drastically impact performance.
Overrides StanzaBase.send
Arguments:
block -- Specify if the send call will block until a response
is received, or a timeout occurs. Defaults to True.
timeout -- The length of time (in seconds) to wait for a response
before exiting the send call if blocking is used.
Defaults to sleekxmpp.xmlstream.RESPONSE_TIMEOUT
"""
if block and self['type'] in ('get', 'set'):
waitfor = Waiter('IqWait_%s' % self['id'], MatcherId(self['id']))
self.stream.registerHandler(waitfor)
StanzaBase.send(self)
return waitfor.wait(timeout)
else:
return StanzaBase.send(self)
|
glmcdona/meddle
|
refs/heads/master
|
examples/base/Lib/distutils/extension.py
|
250
|
"""distutils.extension
Provides the Extension class, used to describe C/C++ extension
modules in setup scripts."""
__revision__ = "$Id$"
import os, string, sys
from types import *
try:
import warnings
except ImportError:
warnings = None
# This class is really only used by the "build_ext" command, so it might
# make sense to put it in distutils.command.build_ext. However, that
# module is already big enough, and I want to make this class a bit more
# complex to simplify some common cases ("foo" module in "foo.c") and do
# better error-checking ("foo.c" actually exists).
#
# Also, putting this in build_ext.py means every setup script would have to
# import that large-ish module (indirectly, through distutils.core) in
# order to do anything.
class Extension:
"""Just a collection of attributes that describes an extension
module and everything needed to build it (hopefully in a portable
way, but there are hooks that let you be as unportable as you need).
Instance attributes:
name : string
the full name of the extension, including any packages -- ie.
*not* a filename or pathname, but Python dotted name
sources : [string]
list of source filenames, relative to the distribution root
(where the setup script lives), in Unix form (slash-separated)
for portability. Source files may be C, C++, SWIG (.i),
platform-specific resource files, or whatever else is recognized
by the "build_ext" command as source for a Python extension.
include_dirs : [string]
list of directories to search for C/C++ header files (in Unix
form for portability)
define_macros : [(name : string, value : string|None)]
list of macros to define; each macro is defined using a 2-tuple,
where 'value' is either the string to define it to or None to
define it without a particular value (equivalent of "#define
FOO" in source or -DFOO on Unix C compiler command line)
undef_macros : [string]
list of macros to undefine explicitly
library_dirs : [string]
list of directories to search for C/C++ libraries at link time
libraries : [string]
list of library names (not filenames or paths) to link against
runtime_library_dirs : [string]
list of directories to search for C/C++ libraries at run time
(for shared extensions, this is when the extension is loaded)
extra_objects : [string]
list of extra files to link with (eg. object files not implied
by 'sources', static library that must be explicitly specified,
binary resource files, etc.)
extra_compile_args : [string]
any extra platform- and compiler-specific information to use
when compiling the source files in 'sources'. For platforms and
compilers where "command line" makes sense, this is typically a
list of command-line arguments, but for other platforms it could
be anything.
extra_link_args : [string]
any extra platform- and compiler-specific information to use
when linking object files together to create the extension (or
to create a new static Python interpreter). Similar
interpretation as for 'extra_compile_args'.
export_symbols : [string]
list of symbols to be exported from a shared extension. Not
used on all platforms, and not generally necessary for Python
extensions, which typically export exactly one symbol: "init" +
extension_name.
swig_opts : [string]
any extra options to pass to SWIG if a source file has the .i
extension.
depends : [string]
list of files that the extension depends on
language : string
extension language (i.e. "c", "c++", "objc"). Will be detected
from the source extensions if not provided.
"""
# When adding arguments to this constructor, be sure to update
# setup_keywords in core.py.
def __init__ (self, name, sources,
include_dirs=None,
define_macros=None,
undef_macros=None,
library_dirs=None,
libraries=None,
runtime_library_dirs=None,
extra_objects=None,
extra_compile_args=None,
extra_link_args=None,
export_symbols=None,
swig_opts = None,
depends=None,
language=None,
**kw # To catch unknown keywords
):
assert type(name) is StringType, "'name' must be a string"
assert (type(sources) is ListType and
map(type, sources) == [StringType]*len(sources)), \
"'sources' must be a list of strings"
self.name = name
self.sources = sources
self.include_dirs = include_dirs or []
self.define_macros = define_macros or []
self.undef_macros = undef_macros or []
self.library_dirs = library_dirs or []
self.libraries = libraries or []
self.runtime_library_dirs = runtime_library_dirs or []
self.extra_objects = extra_objects or []
self.extra_compile_args = extra_compile_args or []
self.extra_link_args = extra_link_args or []
self.export_symbols = export_symbols or []
self.swig_opts = swig_opts or []
self.depends = depends or []
self.language = language
# If there are unknown keyword options, warn about them
if len(kw):
L = kw.keys() ; L.sort()
L = map(repr, L)
msg = "Unknown Extension options: " + string.join(L, ', ')
if warnings is not None:
warnings.warn(msg)
else:
sys.stderr.write(msg + '\n')
# class Extension
def read_setup_file (filename):
from distutils.sysconfig import \
parse_makefile, expand_makefile_vars, _variable_rx
from distutils.text_file import TextFile
from distutils.util import split_quoted
# First pass over the file to gather "VAR = VALUE" assignments.
vars = parse_makefile(filename)
# Second pass to gobble up the real content: lines of the form
# <module> ... [<sourcefile> ...] [<cpparg> ...] [<library> ...]
file = TextFile(filename,
strip_comments=1, skip_blanks=1, join_lines=1,
lstrip_ws=1, rstrip_ws=1)
try:
extensions = []
while 1:
line = file.readline()
if line is None: # eof
break
if _variable_rx.match(line): # VAR=VALUE, handled in first pass
continue
if line[0] == line[-1] == "*":
file.warn("'%s' lines not handled yet" % line)
continue
#print "original line: " + line
line = expand_makefile_vars(line, vars)
words = split_quoted(line)
#print "expanded line: " + line
# NB. this parses a slightly different syntax than the old
# makesetup script: here, there must be exactly one extension per
# line, and it must be the first word of the line. I have no idea
# why the old syntax supported multiple extensions per line, as
# they all wind up being the same.
module = words[0]
ext = Extension(module, [])
append_next_word = None
for word in words[1:]:
if append_next_word is not None:
append_next_word.append(word)
append_next_word = None
continue
suffix = os.path.splitext(word)[1]
switch = word[0:2] ; value = word[2:]
if suffix in (".c", ".cc", ".cpp", ".cxx", ".c++", ".m", ".mm"):
# hmm, should we do something about C vs. C++ sources?
# or leave it up to the CCompiler implementation to
# worry about?
ext.sources.append(word)
elif switch == "-I":
ext.include_dirs.append(value)
elif switch == "-D":
equals = string.find(value, "=")
if equals == -1: # bare "-DFOO" -- no value
ext.define_macros.append((value, None))
else: # "-DFOO=blah"
ext.define_macros.append((value[0:equals],
value[equals+2:]))
elif switch == "-U":
ext.undef_macros.append(value)
elif switch == "-C": # only here 'cause makesetup has it!
ext.extra_compile_args.append(word)
elif switch == "-l":
ext.libraries.append(value)
elif switch == "-L":
ext.library_dirs.append(value)
elif switch == "-R":
ext.runtime_library_dirs.append(value)
elif word == "-rpath":
append_next_word = ext.runtime_library_dirs
elif word == "-Xlinker":
append_next_word = ext.extra_link_args
elif word == "-Xcompiler":
append_next_word = ext.extra_compile_args
elif switch == "-u":
ext.extra_link_args.append(word)
if not value:
append_next_word = ext.extra_link_args
elif word == "-Xcompiler":
append_next_word = ext.extra_compile_args
elif switch == "-u":
ext.extra_link_args.append(word)
if not value:
append_next_word = ext.extra_link_args
elif suffix in (".a", ".so", ".sl", ".o", ".dylib"):
# NB. a really faithful emulation of makesetup would
# append a .o file to extra_objects only if it
# had a slash in it; otherwise, it would s/.o/.c/
# and append it to sources. Hmmmm.
ext.extra_objects.append(word)
else:
file.warn("unrecognized argument '%s'" % word)
extensions.append(ext)
finally:
file.close()
#print "module:", module
#print "source files:", source_files
#print "cpp args:", cpp_args
#print "lib args:", library_args
#extensions[module] = { 'sources': source_files,
# 'cpp_args': cpp_args,
# 'lib_args': library_args }
return extensions
# read_setup_file ()
|
ImageEngine/gaffer
|
refs/heads/master
|
python/GafferUITest/WindowTest.py
|
5
|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import warnings
import weakref
import imath
import IECore
import Gaffer
import GafferUI
import GafferUITest
from Qt import QtGui
from Qt import QtWidgets
class TestWidget( GafferUI.Widget ) :
def __init__( self ) :
GafferUI.Widget.__init__( self, QtWidgets.QLabel( "hello" ) )
class WindowTest( GafferUITest.TestCase ) :
def testTitle( self ) :
w = GafferUI.Window()
self.assertEqual( w.getTitle(), "GafferUI.Window" )
w = GafferUI.Window( "myTitle" )
self.assertEqual( w.getTitle(), "myTitle" )
w.setTitle( "myOtherTitle" )
self.assertEqual( w.getTitle(), "myOtherTitle" )
def testChild( self ) :
w = GafferUI.Window()
self.assertEqual( w.getChild(), None )
w.setChild( TestWidget() )
self.assertIsNotNone( w.getChild() )
self.assertIsInstance( w.getChild(), TestWidget )
t = TestWidget()
w.setChild( t )
self.assertTrue( w.getChild() is t )
self.assertTrue( w.getChild()._qtWidget() is t._qtWidget() )
self.assertTrue( t.parent() is w )
w.setChild( None )
self.assertIsNone( w.getChild() )
self.assertIsNone( t.parent() )
def testReparent( self ) :
w1 = GafferUI.Window()
w2 = GafferUI.Window()
t = TestWidget()
w1.setChild( t )
self.assertTrue( t.parent() is w1 )
self.assertTrue( w1.getChild() is t )
self.assertIsNone( w2.getChild() )
self.assertTrue( GafferUI.Widget._owner( t._qtWidget() ) is t )
w2.setChild( t )
self.assertTrue( t.parent() is w2 )
self.assertIsNone( w1.getChild() )
self.assertTrue( w2.getChild() is t )
self.assertTrue( GafferUI.Widget._owner( t._qtWidget() ) is t )
def testWindowParent( self ) :
parentWindow1 = GafferUI.Window()
parentWindow2 = GafferUI.Window()
childWindow = GafferUI.Window()
childWindowWeakRef = weakref.ref( childWindow )
self.assertIsNone( parentWindow1.parent() )
self.assertIsNone( parentWindow2.parent() )
self.assertIsNone( childWindow.parent() )
parentWindow1.addChildWindow( childWindow )
self.assertIsNone( parentWindow1.parent() )
self.assertIsNone( parentWindow2.parent() )
self.assertTrue( childWindow.parent() is parentWindow1 )
parentWindow1.setVisible( True )
childWindow.setVisible( True )
self.waitForIdle( 1000 )
parentWindow2.addChildWindow( childWindow )
self.assertIsNone( parentWindow1.parent() )
self.assertIsNone( parentWindow2.parent() )
self.assertTrue( childWindow.parent() is parentWindow2 )
parentWindow2.setVisible( True )
self.waitForIdle( 1000 )
parentWindow2.removeChild( childWindow )
self.assertIsNone( parentWindow1.parent() )
self.assertIsNone( parentWindow2.parent() )
self.assertIsNone( childWindow.parent() )
self.waitForIdle( 1000 )
parentWindow1.addChildWindow( childWindow )
self.assertTrue( childWindow.parent() is parentWindow1 )
self.waitForIdle( 1000 )
parentWindow1.removeChild( childWindow )
del childWindow
self.assertIsNone( childWindowWeakRef() )
def testWindowHoldsReferenceToChildWindows( self ) :
parentWindow = GafferUI.Window()
childWindow = GafferUI.Window()
childWindowWeakRef = weakref.ref( childWindow )
parentWindow.addChildWindow( childWindow )
del childWindow
self.assertIsNotNone( childWindowWeakRef() )
del parentWindow
self.assertIsNone( childWindowWeakRef() )
def testCloseMethod( self ) :
self.__windowWasClosed = 0
def closeFn( w ) :
assert( isinstance( w, GafferUI.Window ) )
self.__windowWasClosed += 1
w = GafferUI.Window()
w.setVisible( True )
self.assertEqual( w.getVisible(), True )
c = w.closedSignal().connect( closeFn )
self.assertEqual( w.close(), True )
self.assertEqual( w.getVisible(), False )
self.assertEqual( self.__windowWasClosed, 1 )
def testUserCloseAction( self ) :
self.__windowWasClosed = 0
def closeFn( w ) :
assert( isinstance( w, GafferUI.Window ) )
self.__windowWasClosed += 1
w = GafferUI.Window()
w.setVisible( True )
self.assertEqual( w.getVisible(), True )
c = w.closedSignal().connect( closeFn )
# simulate user clicking on the x
w._qtWidget().close()
self.assertEqual( w.getVisible(), False )
self.assertEqual( self.__windowWasClosed, 1 )
def testCloseDenial( self ) :
self.__windowWasClosed = 0
def closeFn( w ) :
assert( isinstance( w, GafferUI.Window ) )
self.__windowWasClosed += 1
class TestWindow( GafferUI.Window ) :
def __init__( self ) :
GafferUI.Window.__init__( self )
def _acceptsClose( self ) :
return False
w = TestWindow()
w.setVisible( True )
self.assertEqual( w.getVisible(), True )
c = w.closedSignal().connect( closeFn )
self.assertEqual( w.close(), False )
self.assertEqual( w.getVisible(), True )
self.assertEqual( self.__windowWasClosed, 0 )
# simulate user clicking on the x
w._qtWidget().close()
self.assertEqual( w.getVisible(), True )
self.assertEqual( self.__windowWasClosed, 0 )
def testAutomaticParenting( self ) :
with GafferUI.Window() as w :
d = GafferUI.Window()
f = GafferUI.Frame()
# should only accept one child
self.assertRaises( Exception, GafferUI.Frame )
# should accept any number of child windows though
d2 = GafferUI.Window()
self.assertTrue( d.parent() is w )
self.assertTrue( f.parent() is w )
self.assertTrue( d2.parent() is w )
def testSizeMode( self ) :
w = GafferUI.Window()
self.assertEqual( w.getSizeMode(), w.SizeMode.Manual )
w = GafferUI.Window( sizeMode=GafferUI.Window.SizeMode.Fixed )
self.assertEqual( w.getSizeMode(), w.SizeMode.Fixed )
w.setSizeMode( GafferUI.Window.SizeMode.Automatic )
self.assertEqual( w.getSizeMode(), w.SizeMode.Automatic )
def testResizeable( self ) :
# The methods we are testing are deprecated, so we must
# ignore the deprecation warnings they emit, as otherwise
# they would become exceptions.
with warnings.catch_warnings() :
warnings.simplefilter( "ignore", DeprecationWarning )
w = GafferUI.Window()
self.assertTrue( w.getResizeable() )
w.setResizeable( False )
self.assertFalse( w.getResizeable() )
w.setResizeable( True )
self.assertTrue( w.getResizeable() )
def testPosition( self ) :
w = GafferUI.Window()
w._qtWidget().resize( 200, 100 )
self.assertEqual( ( w._qtWidget().width(), w._qtWidget().height() ), ( 200, 100 ) )
w.setPosition( imath.V2i( 20, 30 ) )
self.assertEqual( w.getPosition(), imath.V2i( 20, 30 ) )
desktop = QtWidgets.QApplication.desktop()
screenRect = desktop.availableGeometry( w._qtWidget() )
windowRect = w._qtWidget().frameGeometry()
# Smaller, off-screen bottom right
w.setPosition( imath.V2i( screenRect.right() - 50, screenRect.bottom() - 75 ) )
self.assertEqual(
w.getPosition(),
imath.V2i(
screenRect.right() - windowRect.width() + 1,
screenRect.bottom() - windowRect.height() + 1
)
)
# Smaller, off-screen top left
w.setPosition( imath.V2i( screenRect.left() - 25 , screenRect.top() - 15 ) )
self.assertEqual( w.getPosition(), imath.V2i( screenRect.left(), screenRect.top() ) )
# Bigger width only
w._qtWidget().resize( screenRect.width() + 300, 200 )
windowRect = w._qtWidget().frameGeometry()
w.setPosition( imath.V2i( 100, 100 ) )
self.assertEqual( w.getPosition(), imath.V2i( screenRect.left(), 100 ) )
self.assertEqual( w._qtWidget().frameGeometry().size(), windowRect.size() )
# Bigger
w._qtWidget().resize( screenRect.width() + 300, screenRect.height() + 200 )
windowRect = w._qtWidget().frameGeometry()
w.setPosition( imath.V2i( 100, 100 ) )
self.assertEqual( w.getPosition(), imath.V2i( screenRect.left(), screenRect.top() ) )
self.assertEqual( w._qtWidget().frameGeometry().size(), windowRect.size() )
# Force position
w.setPosition( imath.V2i( 100, 100 ), forcePosition = True )
self.assertEqual( w.getPosition(), imath.V2i( 100, 100 ) )
self.assertEqual( w._qtWidget().frameGeometry().size(), windowRect.size() )
def testChildWindowsMethod( self ) :
w = GafferUI.Window()
self.assertEqual( w.childWindows(), [] )
wc1 = GafferUI.Window()
w.addChildWindow( wc1 )
self.assertEqual( w.childWindows(), [ wc1 ] )
wc2 = GafferUI.Window()
w.addChildWindow( wc2 )
self.assertEqual( len( w.childWindows() ), 2 )
self.assertIn( wc1, w.childWindows() )
self.assertIn( wc2, w.childWindows() )
c = w.childWindows()
c.remove( wc1 )
# editing the list itself should have no effect
self.assertEqual( len( w.childWindows() ), 2 )
self.assertIn( wc1, w.childWindows() )
self.assertIn( wc2, w.childWindows() )
w.removeChild( wc1 )
self.assertEqual( w.childWindows(), [ wc2 ] )
def testRemoveChildWindowOnClose( self ) :
# removeOnClose == False
parent = GafferUI.Window()
child = GafferUI.Window()
parent.addChildWindow( child )
parent.setVisible( True )
child.setVisible( True )
child.close()
self.waitForIdle()
self.assertTrue( child in parent.childWindows() )
# removeOnClose == True
parent = GafferUI.Window()
child = GafferUI.Window()
parent.addChildWindow( child, removeOnClose = True )
parent.setVisible( True )
child.setVisible( True )
child.close()
self.waitForIdle()
self.assertFalse( child in parent.childWindows() )
w = weakref.ref( child )
del child
self.assertEqual( w(), None )
def testRemoveOnCloseCrash( self ) :
parent = GafferUI.Window()
parent.setChild( GafferUI.Label( "Hello" ) )
parent.setVisible( True )
for i in range( 0, 50 ) :
child = GafferUI.Window()
child.setChild( GafferUI.Label( "World" ) )
parent.addChildWindow( child, removeOnClose = True )
child.setVisible( True )
self.waitForIdle()
qWindow = child._qtWidget().windowHandle()
weakChild = weakref.ref( child )
del child
# Simulate a click on the close button of the QWindow for the child
# window. This ripples down to the close handling in GafferUI.Window,
# and should remove the child window cleanly.
QtWidgets.QApplication.sendEvent( qWindow, QtGui.QCloseEvent() )
self.waitForIdle( 1000 )
self.assertEqual( parent.childWindows(), [] )
self.assertEqual( weakChild(), None )
if __name__ == "__main__":
unittest.main()
|
vanzhiganov/flask-blog
|
refs/heads/master
|
mdx_code_multiline.py
|
19
|
from markdown import Extension
from markdown.util import etree
from markdown.inlinepatterns import Pattern
RE = r'\[code\](.*?)\[\/code\]'
class MultilineCodeExtension(Extension):
def extendMarkdown(self, md, md_globals):
element = NestedElements(RE)
md.inlinePatterns.add('pre', element, '<not_strong')
class NestedElements(Pattern):
def handleMatch(self, m):
el1 = etree.Element('pre')
el2 = etree.SubElement(el1, 'cite')
el2.text = m.group(2).strip()
return el1
def makeExtension(configs=None):
return MultilineCodeExtension(configs=configs)
|
dhondta/tinyscript
|
refs/heads/master
|
tinyscript/preimports/ncodecs/barbie.py
|
1
|
# -*- coding: UTF-8 -*-
"""Barbie typewriter Codec - barbie content encoding.
While Barbie typewriter is more a cipher, its very limited key size of 2 bits
makes it easy to turn into four variants of the same encoding.
This codec:
- en/decodes strings from str to str
- en/decodes strings from bytes to bytes
- decodes file content to str (read)
- encodes file content from str to bytes (write)
Reference: http://www.cryptomuseum.com/crypto/mehano/barbie/
"""
from ._utils import *
REPLACE_CHAR = "?"
STD = [
"abcdefghijklmnopqrstuvABCDEFGHIJKLMNOPQRSTUVWXYZ0123456 \n\t",
"icolapxstvybjeruknfhqg>FAUTCYOLVJDZINQKSEHG<.1PB5234067 \n\t",
"torbiudfhgzcvanqyepskxRC>GHAPND<VUBLIKJETOYXM2QF6340578 \n\t",
"hrnctqlpsxwogiekzaufydSARYO>QIUX<GFDLJVTHNP1Z3KC7405689 \n\t",
"sneohkbufd;rxtaywiqpzlE>SPNRKLG1XYCUDV<HOIQ2B4JA805679- \n\t",
]
SPEC = [
"w x y z 7 8 9 - \' ! \" # % & ( ) * , . ¨ / : ; ? @ ^ _ + < = > ¢ £ § €",
"; d z w 8 9 - ¨ _ & m @ : \" * ( # W M § ^ , ¢ / ? ! ) % X \' R + € £ =",
"¢ l w ; 9 - ¨ § ) \" j ? , m # * @ . Z £ ! W + ^ / & ( : 1 _ S % = € \'",
"+ b ; ¢ - ¨ § £ ( m v / W j @ # ? M B € & . % ! ^ \" * , 2 ) E : \' = _",
"% c ¢ + ¨ § £ € * j g ^ . v ? @ / Z F = \" N : & ! m # W 3 ( T , _ \' )",
]
class BarbieError(ValueError):
pass
class BarbieDecodeError(BarbieError):
pass
class BarbieEncodeError(BarbieError):
pass
def barbie_encode(code):
def encode(text, errors="strict"):
i = int(code)
std0, stdn = [c for c in STD[0]], [c for c in STD[i]]
spec0, specn = SPEC[0].split(), SPEC[i].split()
mapping = {d: e for d, e in zip(std0 + spec0, stdn + specn)}
r = ""
for i, c in enumerate(ensure_str(text)):
try:
r += mapping[c]
except KeyError:
if errors == "strict":
raise BarbieEncodeError("'barbie' codec can't encode"
" character '{}' in position {}"
.format(c, i))
elif errors == "replace":
r += REPLACE_CHAR
elif errors == "ignore":
continue
else:
raise ValueError("Unsupported error handling {}"
.format(errors))
return r, len(text)
return encode
def barbie_decode(code):
def decode(text, errors="strict"):
i = int(code)
std0, stdn = [c for c in STD[0]], [c for c in STD[i]]
spec0, specn = SPEC[0].split(), SPEC[i].split()
mapping = {e: d for d, e in zip(std0 + spec0, stdn + specn)}
r = ""
for i, c in enumerate(ensure_str(text)):
try:
r += mapping[c]
except KeyError:
if errors == "strict":
raise BarbieDecodeError("'barbie' codec can't decode"
" character '{}' in position {}"
.format(c, i))
elif errors == "replace":
r += REPLACE_CHAR
elif errors == "ignore":
continue
else:
raise ValueError("Unsupported error handling {}"
.format(errors))
return r, len(text)
return decode
# note: the integer behind "barbie" is captured for sending to the
# parametrizable encode and decode functions "barbie_**code"
codecs.add_codec("barbie", barbie_encode, barbie_decode, r"barbie[-_]?([1-4])$")
|
furf/pledge_service
|
refs/heads/master
|
lib/requests/packages/chardet/euctwfreq.py
|
3132
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# EUCTW frequency table
# Converted from big5 work
# by Taiwan's Mandarin Promotion Council
# <http:#www.edu.tw:81/mandr/>
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
# Char to FreqOrder table ,
EUCTW_TABLE_SIZE = 8102
EUCTWCharToFreqOrder = (
1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742
3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758
1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774
63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790
3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806
4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822
7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838
630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854
179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870
995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886
2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902
1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918
3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934
706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966
3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982
2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998
437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014
3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030
1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046
7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062
266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078
7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094
1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110
32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126
188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142
3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158
3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174
324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190
2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206
2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222
314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238
287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254
3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270
1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286
1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302
1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318
2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334
265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350
4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366
1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382
7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398
2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414
383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430
98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446
523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462
710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478
7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494
379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510
1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526
585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542
690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558
7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574
1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590
544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606
3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622
4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638
3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654
279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670
610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686
1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702
4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718
3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734
3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750
2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766
7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782
3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798
7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814
1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830
2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846
1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862
78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878
1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894
4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910
3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926
534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942
165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958
626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974
2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990
7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006
1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022
2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038
1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054
1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070
7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086
7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102
7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118
3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134
4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150
1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166
7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182
2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198
7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214
3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230
3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246
7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262
2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278
7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294
862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310
4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326
2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342
7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358
3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374
2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390
2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406
294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422
2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438
1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454
1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470
2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486
1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502
7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518
7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534
2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550
4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566
1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582
7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598
829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614
4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630
375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646
2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662
444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678
1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694
1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710
730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726
3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742
3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758
1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774
3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790
7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806
7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822
1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838
2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854
1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870
3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886
2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902
3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918
2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934
4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950
4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966
3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982
97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998
3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014
424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030
3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046
3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062
3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078
1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094
7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110
199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126
7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142
1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158
391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174
4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190
3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206
397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222
2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238
2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254
3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270
1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286
4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302
2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318
1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334
1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350
2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366
3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382
1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398
7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414
1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430
4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446
1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462
135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478
1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494
3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510
3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526
2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542
1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558
4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574
660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590
7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606
2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622
3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638
4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654
790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670
7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686
7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702
1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718
4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734
3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750
2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766
3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782
3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798
2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814
1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830
4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846
3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862
3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878
2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894
4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910
7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926
3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942
2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958
3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974
1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990
2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006
3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022
4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038
2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054
2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070
7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086
1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102
2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118
1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134
3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150
4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166
2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182
3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198
3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214
2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230
4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246
2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262
3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278
4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294
7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310
3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326
194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342
1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358
4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374
1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390
4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406
7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422
510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438
7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454
2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470
1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486
1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502
3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518
509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534
552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550
478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566
3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582
2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598
751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614
7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630
1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646
3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662
7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678
1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694
7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710
4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726
1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742
2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758
2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774
4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790
802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806
809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822
3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838
3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854
1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870
2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886
7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902
1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918
1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934
3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950
919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966
1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982
4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998
7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014
2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030
3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046
516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062
1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078
2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094
2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110
7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126
7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142
7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158
2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174
2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190
1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206
4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222
3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238
3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254
4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270
4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286
2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302
2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318
7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334
4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350
7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366
2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382
1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398
3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414
4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430
2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446
120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462
2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478
1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494
2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510
2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526
4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542
7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558
1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574
3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590
7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606
1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622
8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638
2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654
8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670
2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686
2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702
8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718
8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734
8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750
408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766
8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782
4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798
3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814
8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830
1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846
8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862
425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878
1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894
479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910
4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926
1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942
4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958
1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974
433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990
3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006
4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022
8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038
938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054
3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070
890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086
2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102
#Everything below is of no interest for detection purpose
2515,1613,4582,8119,3312,3866,2516,8120,4058,8121,1637,4059,2466,4583,3867,8122, # 8118
2493,3016,3734,8123,8124,2192,8125,8126,2162,8127,8128,8129,8130,8131,8132,8133, # 8134
8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,8144,8145,8146,8147,8148,8149, # 8150
8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,8160,8161,8162,8163,8164,8165, # 8166
8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181, # 8182
8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197, # 8198
8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213, # 8214
8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229, # 8230
8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245, # 8246
8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,8256,8257,8258,8259,8260,8261, # 8262
8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,8272,8273,8274,8275,8276,8277, # 8278
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,8290,8291,8292,8293, # 8294
8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,8308,8309, # 8310
8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322,8323,8324,8325, # 8326
8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337,8338,8339,8340,8341, # 8342
8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353,8354,8355,8356,8357, # 8358
8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,8368,8369,8370,8371,8372,8373, # 8374
8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,8384,8385,8386,8387,8388,8389, # 8390
8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,8400,8401,8402,8403,8404,8405, # 8406
8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,8416,8417,8418,8419,8420,8421, # 8422
8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,8432,8433,8434,8435,8436,8437, # 8438
8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,8448,8449,8450,8451,8452,8453, # 8454
8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,8464,8465,8466,8467,8468,8469, # 8470
8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,8480,8481,8482,8483,8484,8485, # 8486
8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501, # 8502
8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517, # 8518
8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533, # 8534
8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549, # 8550
8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,8565, # 8566
8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,8576,8577,8578,8579,8580,8581, # 8582
8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597, # 8598
8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,8608,8609,8610,8611,8612,8613, # 8614
8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,8624,8625,8626,8627,8628,8629, # 8630
8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,8640,8641,8642,8643,8644,8645, # 8646
8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,8657,8658,8659,8660,8661, # 8662
8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672,8673,8674,8675,8676,8677, # 8678
8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,8688,8689,8690,8691,8692,8693, # 8694
8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,8704,8705,8706,8707,8708,8709, # 8710
8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,8720,8721,8722,8723,8724,8725, # 8726
8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,8736,8737,8738,8739,8740,8741) # 8742
# flake8: noqa
|
mancoast/CPythonPyc_test
|
refs/heads/master
|
cpython/279_sample_doctest.py
|
229
|
"""This is a sample module that doesn't really test anything all that
interesting.
It simply has a few tests, some of which succeed and some of which fail.
It's important that the numbers remain constant as another test is
testing the running of these tests.
>>> 2+2
4
"""
def foo():
"""
>>> 2+2
5
>>> 2+2
4
"""
def bar():
"""
>>> 2+2
4
"""
def test_silly_setup():
"""
>>> import test.test_doctest
>>> test.test_doctest.sillySetup
True
"""
def w_blank():
"""
>>> if 1:
... print 'a'
... print
... print 'b'
a
<BLANKLINE>
b
"""
x = 1
def x_is_one():
"""
>>> x
1
"""
def y_is_one():
"""
>>> y
1
"""
__test__ = {'good': """
>>> 42
42
""",
'bad': """
>>> 42
666
""",
}
def test_suite():
import doctest
return doctest.DocTestSuite()
|
scalable-networks/gnuradio-3.7.2.1
|
refs/heads/master
|
gr-digital/python/digital/qa_correlate_access_code.py
|
10
|
#!/usr/bin/env python
#
# Copyright 2006,2007,2010,2011,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, digital, blocks
default_access_code = '\xAC\xDD\xA4\xE2\xF2\x8C\x20\xFC'
def string_to_1_0_list(s):
r = []
for ch in s:
x = ord(ch)
for i in range(8):
t = (x >> i) & 0x1
r.append(t)
return r
def to_1_0_string(L):
return ''.join(map(lambda x: chr(x + ord('0')), L))
class test_correlate_access_code(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001(self):
pad = (0,) * 64
# 0 0 0 1 0 0 0 1
src_data = (1, 0, 1, 1, 1, 1, 0, 1, 1) + pad + (0,) * 7
expected_result = pad + (1, 0, 1, 1, 3, 1, 0, 1, 1, 2) + (0,) * 6
src = blocks.vector_source_b(src_data)
op = digital.correlate_access_code_bb("1011", 0)
dst = blocks.vector_sink_b()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def test_002(self):
code = tuple(string_to_1_0_list(default_access_code))
access_code = to_1_0_string(code)
pad = (0,) * 64
#print code
#print access_code
src_data = code + (1, 0, 1, 1) + pad
expected_result = pad + code + (3, 0, 1, 1)
src = blocks.vector_source_b(src_data)
op = digital.correlate_access_code_bb(access_code, 0)
dst = blocks.vector_sink_b()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def test_003(self):
code = tuple(string_to_1_0_list(default_access_code))
access_code = to_1_0_string(code)
pad = (0,) * 64
#print code
#print access_code
src_data = code + (1, 0, 1, 1) + pad
expected_result = code + (1, 0, 1, 1) + pad
src = blocks.vector_source_b(src_data)
op = digital.correlate_access_code_tag_bb(access_code, 0, "test")
dst = blocks.vector_sink_b()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
if __name__ == '__main__':
gr_unittest.run(test_correlate_access_code, "test_correlate_access_code.xml")
|
loopCM/chromium
|
refs/heads/trunk
|
third_party/mesa/MesaLib/src/gallium/docs/source/exts/formatting.py
|
52
|
# formatting.py
# Sphinx extension providing formatting for Gallium-specific data
# (c) Corbin Simpson 2010
# Public domain to the extent permitted; contact author for special licensing
import docutils.nodes
import sphinx.addnodes
def parse_envvar(env, sig, signode):
envvar, t, default = sig.split(" ", 2)
envvar = envvar.strip().upper()
t = " Type: %s" % t.strip(" <>").lower()
default = " Default: %s" % default.strip(" ()")
signode += sphinx.addnodes.desc_name(envvar, envvar)
signode += sphinx.addnodes.desc_type(t, t)
signode += sphinx.addnodes.desc_annotation(default, default)
return envvar
def parse_opcode(env, sig, signode):
opcode, desc = sig.split("-", 1)
opcode = opcode.strip().upper()
desc = " (%s)" % desc.strip()
signode += sphinx.addnodes.desc_name(opcode, opcode)
signode += sphinx.addnodes.desc_annotation(desc, desc)
return opcode
def setup(app):
app.add_description_unit("envvar", "envvar", "%s (environment variable)",
parse_envvar)
app.add_description_unit("opcode", "opcode", "%s (TGSI opcode)",
parse_opcode)
|
paplorinc/intellij-community
|
refs/heads/master
|
python/testData/intentions/oneLineDocStub.py
|
83
|
def fo<caret>o(a, b): pass
|
bitifirefly/edx-platform
|
refs/heads/master
|
lms/djangoapps/open_ended_grading/staff_grading.py
|
192
|
"""
LMS part of instructor grading:
- views + ajax handling
- calls the instructor grading service
"""
import logging
log = logging.getLogger(__name__)
class StaffGrading(object):
"""
Wrap up functionality for staff grading of submissions--interface exposes get_html, ajax views.
"""
def __init__(self, course):
self.course = course
def get_html(self):
return "<b>Instructor grading!</b>"
# context = {}
# return render_to_string('courseware/instructor_grading_view.html', context)
|
sanghinitin/golismero
|
refs/heads/master
|
thirdparty_libs/shodan/export.py
|
7
|
import sys
from datetime import datetime
from xml.sax import make_parser, handler
# Type conversion helper functions
def parse_date(args):
return datetime.strptime(args, '%d.%m.%Y')
class ExportSaxParser(handler.ContentHandler):
"""Parses Shodan's export XML file and executes the callback for each
entry.
"""
# Callbacks
entry_cb = None
# Keep track of where we're at
_in_host = False
_in_data = False
_host = None
_data = u''
# Conversion schemas
_host_attr_schema = {
'port': int,
'updated': parse_date,
}
def __init__(self, entry_cb=None):
# Define the callbacks
self.entry_cb = entry_cb
# ContentHandler methods
def startElement(self, name, attrs):
if name =='host':
# Extract all the attribute information
self._host = {}
for (name, value) in attrs.items():
# Convert the field to a native type if it's defined in the schema
self._host[name] = self._host_attr_schema.get(name, lambda x: x)(value)
# Update the state machine
self._in_host = True
elif name == 'data':
self._in_data = True
self._data = u''
def endElement(self, name):
if name == 'host':
# Execute the callback
self.entry_cb(self._host)
# Update the state machine
self._in_host = False
elif name == 'data':
self._host['data'] = self._data
self._in_data = False
def characters(self, content):
if self._in_data:
self._data += content
class ExportParser(object):
entry_cb = None
def __init__(self, entry_cb=None):
self.entry_cb = entry_cb
def parse(self, filename):
parser = make_parser()
parser.setContentHandler(ExportSaxParser(self.entry_cb))
parser.parse(filename)
if __name__ == '__main__':
def test_cb(entry):
print entry
import sys
parser = ExportParser(test_cb)
parser.parse(sys.argv[1])
|
yavuzovski/playground
|
refs/heads/master
|
python/django/RESTTest/.venv/lib/python3.4/site-packages/django/contrib/sessions/management/commands/clearsessions.py
|
729
|
from importlib import import_module
from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = (
"Can be run as a cronjob or directly to clean out expired sessions "
"(only with the database backend at the moment)."
)
def handle(self, **options):
engine = import_module(settings.SESSION_ENGINE)
try:
engine.SessionStore.clear_expired()
except NotImplementedError:
self.stderr.write("Session engine '%s' doesn't support clearing "
"expired sessions.\n" % settings.SESSION_ENGINE)
|
jibe-b/searx
|
refs/heads/master
|
searx/engines/yahoo.py
|
2
|
"""
Yahoo (Web)
@website https://search.yahoo.com/web
@provide-api yes (https://developer.yahoo.com/boss/search/),
$0.80/1000 queries
@using-api no (because pricing)
@results HTML (using search portal)
@stable no (HTML can change)
@parse url, title, content, suggestion
"""
from lxml import html
from searx.engines.xpath import extract_text, extract_url
from searx.url_utils import unquote, urlencode
# engine dependent config
categories = ['general']
paging = True
language_support = True
time_range_support = True
# search-url
base_url = 'https://search.yahoo.com/'
search_url = 'search?{query}&b={offset}&fl=1&vl=lang_{lang}'
search_url_with_time = 'search?{query}&b={offset}&fl=1&vl=lang_{lang}&age={age}&btf={btf}&fr2=time'
supported_languages_url = 'https://search.yahoo.com/web/advanced'
# specific xpath variables
results_xpath = "//div[contains(concat(' ', normalize-space(@class), ' '), ' Sr ')]"
url_xpath = './/h3/a/@href'
title_xpath = './/h3/a'
content_xpath = './/div[@class="compText aAbs"]'
suggestion_xpath = "//div[contains(concat(' ', normalize-space(@class), ' '), ' AlsoTry ')]//a"
time_range_dict = {'day': ['1d', 'd'],
'week': ['1w', 'w'],
'month': ['1m', 'm']}
# remove yahoo-specific tracking-url
def parse_url(url_string):
endings = ['/RS', '/RK']
endpositions = []
start = url_string.find('http', url_string.find('/RU=') + 1)
for ending in endings:
endpos = url_string.rfind(ending)
if endpos > -1:
endpositions.append(endpos)
if start == 0 or len(endpositions) == 0:
return url_string
else:
end = min(endpositions)
return unquote(url_string[start:end])
def _get_url(query, offset, language, time_range):
if time_range in time_range_dict:
return base_url + search_url_with_time.format(offset=offset,
query=urlencode({'p': query}),
lang=language,
age=time_range_dict[time_range][0],
btf=time_range_dict[time_range][1])
return base_url + search_url.format(offset=offset,
query=urlencode({'p': query}),
lang=language)
def _get_language(params):
if params['language'] == 'all':
return 'en'
elif params['language'][:2] == 'zh':
if params['language'] == 'zh' or params['language'] == 'zh-CH':
return 'szh'
else:
return 'tzh'
else:
return params['language'].split('-')[0]
# do search-request
def request(query, params):
if params['time_range'] and params['time_range'] not in time_range_dict:
return params
offset = (params['pageno'] - 1) * 10 + 1
language = _get_language(params)
params['url'] = _get_url(query, offset, language, params['time_range'])
# TODO required?
params['cookies']['sB'] = 'fl=1&vl=lang_{lang}&sh=1&rw=new&v=1'\
.format(lang=language)
return params
# get response from search-request
def response(resp):
results = []
dom = html.fromstring(resp.text)
try:
results_num = int(dom.xpath('//div[@class="compPagination"]/span[last()]/text()')[0]
.split()[0].replace(',', ''))
results.append({'number_of_results': results_num})
except:
pass
# parse results
for result in dom.xpath(results_xpath):
try:
url = parse_url(extract_url(result.xpath(url_xpath), search_url))
title = extract_text(result.xpath(title_xpath)[0])
except:
continue
content = extract_text(result.xpath(content_xpath)[0])
# append result
results.append({'url': url,
'title': title,
'content': content})
# if no suggestion found, return results
suggestions = dom.xpath(suggestion_xpath)
if not suggestions:
return results
# parse suggestion
for suggestion in suggestions:
# append suggestion
results.append({'suggestion': extract_text(suggestion)})
# return results
return results
# get supported languages from their site
def _fetch_supported_languages(resp):
supported_languages = []
dom = html.fromstring(resp.text)
options = dom.xpath('//div[@id="yschlang"]/span/label/input')
for option in options:
code = option.xpath('./@value')[0][5:].replace('_', '-')
supported_languages.append(code)
return supported_languages
|
skywin/p2pool
|
refs/heads/master
|
p2pool/bitcoin/getwork.py
|
267
|
'''
Representation of a getwork request/reply
'''
from __future__ import division
from . import data as bitcoin_data
from . import sha256
from p2pool.util import pack
def _swap4(s):
if len(s) % 4:
raise ValueError()
return ''.join(s[x:x+4][::-1] for x in xrange(0, len(s), 4))
class BlockAttempt(object):
def __init__(self, version, previous_block, merkle_root, timestamp, bits, share_target):
self.version, self.previous_block, self.merkle_root, self.timestamp, self.bits, self.share_target = version, previous_block, merkle_root, timestamp, bits, share_target
def __hash__(self):
return hash((self.version, self.previous_block, self.merkle_root, self.timestamp, self.bits, self.share_target))
def __eq__(self, other):
if not isinstance(other, BlockAttempt):
raise ValueError('comparisons only valid with other BlockAttempts')
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return 'BlockAttempt(%s)' % (', '.join('%s=%r' % (k, v) for k, v in self.__dict__.iteritems()),)
def getwork(self, **extra):
if 'data' in extra or 'hash1' in extra or 'target' in extra or 'midstate' in extra:
raise ValueError()
block_data = bitcoin_data.block_header_type.pack(dict(
version=self.version,
previous_block=self.previous_block,
merkle_root=self.merkle_root,
timestamp=self.timestamp,
bits=self.bits,
nonce=0,
))
getwork = {
'data': _swap4(block_data).encode('hex') + '000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000',
'hash1': '00000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000010000',
'target': pack.IntType(256).pack(self.share_target).encode('hex'),
'midstate': _swap4(sha256.process(sha256.initial_state, block_data[:64])).encode('hex'),
}
getwork = dict(getwork)
getwork.update(extra)
return getwork
@classmethod
def from_getwork(cls, getwork):
attrs = decode_data(getwork['data'])
return cls(
version=attrs['version'],
previous_block=attrs['previous_block'],
merkle_root=attrs['merkle_root'],
timestamp=attrs['timestamp'],
bits=attrs['bits'],
share_target=pack.IntType(256).unpack(getwork['target'].decode('hex')),
)
def update(self, **kwargs):
d = self.__dict__.copy()
d.update(kwargs)
return self.__class__(**d)
def decode_data(data):
return bitcoin_data.block_header_type.unpack(_swap4(data.decode('hex'))[:80])
|
amenonsen/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/fortios/fortios_log_syslogd_override_filter.py
|
14
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_log_syslogd_override_filter
short_description: Override filters for remote system server in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify log_syslogd feature and override_filter category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
log_syslogd_override_filter:
description:
- Override filters for remote system server.
default: null
type: dict
suboptions:
anomaly:
description:
- Enable/disable anomaly logging.
type: str
choices:
- enable
- disable
dns:
description:
- Enable/disable detailed DNS event logging.
type: str
choices:
- enable
- disable
filter:
description:
- Syslog filter.
type: str
filter_type:
description:
- Include/exclude logs that match the filter.
type: str
choices:
- include
- exclude
forward_traffic:
description:
- Enable/disable forward traffic logging.
type: str
choices:
- enable
- disable
gtp:
description:
- Enable/disable GTP messages logging.
type: str
choices:
- enable
- disable
local_traffic:
description:
- Enable/disable local in or out traffic logging.
type: str
choices:
- enable
- disable
multicast_traffic:
description:
- Enable/disable multicast traffic logging.
type: str
choices:
- enable
- disable
netscan_discovery:
description:
- Enable/disable netscan discovery event logging.
type: str
netscan_vulnerability:
description:
- Enable/disable netscan vulnerability event logging.
type: str
severity:
description:
- Lowest severity level to log.
type: str
choices:
- emergency
- alert
- critical
- error
- warning
- notification
- information
- debug
sniffer_traffic:
description:
- Enable/disable sniffer traffic logging.
type: str
choices:
- enable
- disable
ssh:
description:
- Enable/disable SSH logging.
type: str
choices:
- enable
- disable
voip:
description:
- Enable/disable VoIP logging.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Override filters for remote system server.
fortios_log_syslogd_override_filter:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
log_syslogd_override_filter:
anomaly: "enable"
dns: "enable"
filter: "<your_own_value>"
filter_type: "include"
forward_traffic: "enable"
gtp: "enable"
local_traffic: "enable"
multicast_traffic: "enable"
netscan_discovery: "<your_own_value>"
netscan_vulnerability: "<your_own_value>"
severity: "emergency"
sniffer_traffic: "enable"
ssh: "enable"
voip: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_log_syslogd_override_filter_data(json):
option_list = ['anomaly', 'dns', 'filter',
'filter_type', 'forward_traffic', 'gtp',
'local_traffic', 'multicast_traffic', 'netscan_discovery',
'netscan_vulnerability', 'severity', 'sniffer_traffic',
'ssh', 'voip']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def log_syslogd_override_filter(data, fos):
vdom = data['vdom']
log_syslogd_override_filter_data = data['log_syslogd_override_filter']
filtered_data = underscore_to_hyphen(filter_log_syslogd_override_filter_data(log_syslogd_override_filter_data))
return fos.set('log.syslogd',
'override-filter',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_log_syslogd(data, fos):
if data['log_syslogd_override_filter']:
resp = log_syslogd_override_filter(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"log_syslogd_override_filter": {
"required": False, "type": "dict", "default": None,
"options": {
"anomaly": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dns": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"filter": {"required": False, "type": "str"},
"filter_type": {"required": False, "type": "str",
"choices": ["include", "exclude"]},
"forward_traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"gtp": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"local_traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"multicast_traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"netscan_discovery": {"required": False, "type": "str"},
"netscan_vulnerability": {"required": False, "type": "str"},
"severity": {"required": False, "type": "str",
"choices": ["emergency", "alert", "critical",
"error", "warning", "notification",
"information", "debug"]},
"sniffer_traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ssh": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"voip": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_log_syslogd(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_log_syslogd(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
GunoH/intellij-community
|
refs/heads/master
|
python/helpers/pockets/string.py
|
30
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 the Pockets team, see AUTHORS.
# Licensed under the BSD License, see LICENSE for details.
"""A pocket full of useful string manipulation functions!"""
from __future__ import absolute_import
import re
import six
import sys
from pockets.collections import listify
__all__ = ["camel", "uncamel", "splitcaps"]
# Default regular expression flags
if six.PY2:
_re_flags = re.L | re.M | re.U
else:
_re_flags = re.M | re.U
_whitespace_group_re = re.compile("(\s+)", _re_flags)
_uncamel_re = re.compile(
"(" # The whole expression is in a single group
# Clause 1
"(?<=[^\sA-Z])" # Preceded by neither a space nor a capital letter
"[A-Z]+[^a-z\s]*" # All non-lowercase beginning with a capital letter
"(?=[A-Z][^A-Z\s]*?[a-z]|\s|$)" # Followed by a capitalized word
"|"
# Clause 2
"(?<=[^\s])" # Preceded by a character that is not a space
"[A-Z][^A-Z\s]*?[a-z]+[^A-Z\s]*" # Capitalized word
")", _re_flags)
_splitcaps_re = re.compile(
# Clause 1
"[A-Z]+[^a-z]*" # All non-lowercase beginning with a capital letter
"(?=[A-Z][^A-Z]*?[a-z]|$)" # Followed by a capitalized word
"|"
# Clause 2
"[A-Z][^A-Z]*?[a-z]+[^A-Z]*" # Capitalized word
"|"
# Clause 3
"[^A-Z]+", # All non-uppercase
_re_flags)
def camel(s, sep="_", lower_initial=False, upper_segments=None,
preserve_upper=False):
"""Convert underscore_separated string (aka snake_case) to CamelCase.
Works on full sentences as well as individual words:
>>> camel("hello_world!")
'HelloWorld!'
>>> camel("Totally works as_expected, even_with_whitespace!")
'Totally Works AsExpected, EvenWithWhitespace!'
Args:
sep (string, optional): Delineates segments of `s` that will be
CamelCased. Defaults to an underscore "_".
For example, if you want to CamelCase a dash separated word:
>>> camel("xml-http-request", sep="-")
'XmlHttpRequest'
lower_initial (bool, int, or list, optional): If True, the initial
character of each camelCased word will be lowercase. If False, the
initial character of each CamelCased word will be uppercase.
Defaults to False:
>>> camel("http_request http_response")
'HttpRequest HttpResponse'
>>> camel("http_request http_response", lower_initial=True)
'httpRequest httpResponse'
Optionally, `lower_initial` can be an int or a list of ints,
indicating which individual segments of each CamelCased word
should start with a lowercase. Supports negative numbers to index
segments from the right:
>>> camel("xml_http_request", lower_initial=0)
'xmlHttpRequest'
>>> camel("xml_http_request", lower_initial=-1)
'XmlHttprequest'
>>> camel("xml_http_request", lower_initial=[0, 1])
'xmlhttpRequest'
upper_segments (int or list, optional): Indicates which segments of
CamelCased words should be fully uppercased, instead of just
capitalizing the first letter.
Can be an int, indicating a single segment, or a list of ints,
indicating multiple segments. Supports negative numbers to index
segments from the right.
`upper_segments` is helpful when dealing with acronyms:
>>> camel("tcp_socket_id", upper_segments=0)
'TCPSocketId'
>>> camel("tcp_socket_id", upper_segments=[0, -1])
'TCPSocketID'
>>> camel("tcp_socket_id", upper_segments=[0, -1], lower_initial=1)
'TCPsocketID'
preserve_upper (bool): If True, existing uppercase characters will
not be automatically lowercased. Defaults to False.
>>> camel("xml_HTTP_reQuest")
'XmlHttpRequest'
>>> camel("xml_HTTP_reQuest", preserve_upper=True)
'XmlHTTPReQuest'
Returns:
str: CamelCased version of `s`.
"""
if isinstance(lower_initial, bool):
lower_initial = [0] if lower_initial else []
else:
lower_initial = listify(lower_initial)
upper_segments = listify(upper_segments)
result = []
for word in _whitespace_group_re.split(s):
segments = [segment for segment in word.split(sep) if segment]
count = len(segments)
for i, segment in enumerate(segments):
upper = i in upper_segments or (i - count) in upper_segments
lower = i in lower_initial or (i - count) in lower_initial
if upper and lower:
if preserve_upper:
segment = segment[0] + segment[1:].upper()
else:
segment = segment[0].lower() + segment[1:].upper()
elif upper:
segment = segment.upper()
elif lower:
if not preserve_upper:
segment = segment.lower()
elif preserve_upper:
segment = segment[0].upper() + segment[1:]
else:
segment = segment[0].upper() + segment[1:].lower()
result.append(segment)
return "".join(result)
def uncamel(s, sep="_"):
"""Convert CamelCase string to underscore_separated (aka snake_case).
A CamelCase word is considered to be any uppercase letter followed by zero
or more lowercase letters. Contiguous groups of uppercase letters – like
you would find in an acronym – are also considered part of a single word:
>>> uncamel("Request")
'request'
>>> uncamel("HTTP")
'http'
>>> uncamel("HTTPRequest")
'http_request'
>>> uncamel("xmlHTTPRequest")
'xml_http_request'
Works on full sentences as well as individual words:
>>> uncamel("HelloWorld!")
'hello_world!'
>>> uncamel("Totally works AsExpected, EvenWithWhitespace!")
'totally works as_expected, even_with_whitespace!'
Args:
sep (str, optional): String used to separate CamelCase words. Defaults
to an underscore "_".
For example, if you want dash separated words:
>>> uncamel("XmlHttpRequest", sep="-")
'xml-http-request'
Returns:
str: uncamel_cased version of `s`.
"""
return _uncamel_re.sub(r'{0}\1'.format(sep), s).lower()
def splitcaps(s, pattern=None, maxsplit=None, flags=0):
"""Intelligently split a string on capitalized words.
A capitalized word is considered to be any uppercase letter followed by
zero or more lowercase letters. Contiguous groups of uppercase letters –
like you would find in an acronym – are also considered part of a single
word:
>>> splitcaps("Request")
['Request']
>>> splitcaps("HTTP")
['HTTP']
>>> splitcaps("HTTPRequest")
['HTTP', 'Request']
>>> splitcaps("HTTP/1.1Request")
['HTTP/1.1', 'Request']
>>> splitcaps("xmlHTTPRequest")
['xml', 'HTTP', 'Request']
If no capitalized words are found in `s`, the whole string is
returned in a single element list:
>>> splitcaps("")
['']
>>> splitcaps("lower case words")
['lower case words']
Does not split on whitespace by default. To also split
on whitespace, pass "\\\s+" for `pattern`:
>>> splitcaps("Without whiteSpace pattern")
['Without white', 'Space pattern']
>>> splitcaps("With whiteSpace pattern", pattern="\s+")
['With', 'white', 'Space', 'pattern']
>>> splitcaps("With whiteSpace group", pattern="(\s+)")
['With', ' ', 'white', 'Space', ' ', 'group']
Args:
s (str): The string to split.
pattern (str, optional): In addition to splitting on capital letters,
also split by the occurrences of `pattern`. If capturing
parentheses are used in `pattern`, then the text of all groups in
`pattern` are also returned as part of the resulting list.
Defaults to None.
maxsplit (int, optional): If maxsplit is not specified or -1, then
there is no limit on the number of splits (all possible splits are
made). If maxsplit is >= 0, at most maxsplit splits occur, and the
remainder of the string is returned as the final element of the
list.
flags (int, optional): Flags to pass to the regular expression created
using `pattern`. Ignored if `pattern` is not specified. Defaults
to (re.LOCALE | re.MULTILINE | re.UNICODE).
Returns:
list: List of capitalized substrings in `s`.
"""
if not maxsplit:
if maxsplit == 0:
return [s]
else:
maxsplit = -1
if pattern:
pattern_re = re.compile(pattern, flags or _re_flags)
else:
pattern_re = None
result = []
post_maxsplit = []
for m in _splitcaps_re.finditer(s):
if pattern_re:
for segment in pattern_re.split(m.group()):
if segment:
if maxsplit > 0 and len(result) >= maxsplit:
post_maxsplit.append(segment)
else:
result.append(segment)
else:
result.append(m.group())
if maxsplit > 0 and len(result) >= maxsplit:
if m.end() < len(s):
post_maxsplit.append(s[m.end():])
post_maxsplit = ''.join(post_maxsplit)
if post_maxsplit:
result.append(post_maxsplit)
break
return result if len(result) > 0 else [s]
class UnicodeMixin(object):
"""Mixin class to define the proper __str__/__unicode__ methods in
Python 2 or 3.
Originally found on the `Porting Python 2 Code to Python 3 HOWTO`_.
.. _Porting Python 2 Code to Python 3 HOWTO:
https://docs.python.org/3.3/howto/pyporting.html
"""
if sys.version_info[0] >= 3: # Python 3
def __str__(self):
return self.__unicode__()
else: # Python 2
def __str__(self):
return self.__unicode__().encode('utf8')
|
tienfuc/gogoogle
|
refs/heads/master
|
go.py
|
1
|
from pyvirtualdisplay import Display
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from urllib import quote_plus
import re, os, random
from time import sleep
from user import USER, PASSWORD, PROJECT, FOLDER, DEBUG, PHONE_NUMBER
import sys
def create_project(driver):
# wait for project creation
max_retries = 30
while True:
sleep(1)
try:
project_id = driver.find_element_by_css_selector("b[class=\"ng-binding\"]").text
if project_id:
print "Project creation done with id: %s" % project_id
return project_id
except:
pass
max_retries -= 1
if max_retries < 0:
raise Exception("Project creation failed with name: %s" % PROJECT)
def delay_send_keys(element, keys):
delay1 = round(random.uniform(1, 2), 2)
print "Delay %.2f after send_keys(): %s" % (delay1, keys)
for k in list(keys):
delay2 = round(random.uniform(0, 0.2), 2)
sleep(delay2)
element.send_keys(k)
sleep(delay1)
def spoof_click(driver):
total = random.randint(2,6)
count = 1
for i in range(0, total):
items = driver.find_elements_by_xpath("//dt[contains(@class, \"p6n-tree-node ng-scope ng-isolate-scope\")]/div/div/div/a/span[@class=\"ng-binding\"]/..")
for item in random.sample(items, 1):
attr = ""
try:
attr = item.get_attribute("pan-nav-tooltip") or item.get_attribute("title")
except:
print "Error: failed to get pan-nav-tooltip or title"
pass
else:
delay1 = round(random.uniform(0.3, 1), 2)
delay2 = round(random.uniform(0.3, 1), 2)
try:
span = item.find_element_by_xpath("span")
except:
print "Error: failed to get span"
pass
else:
if not span.is_displayed():
try:
top = item.find_element_by_xpath("../../../../../../../preceding-sibling::*[1]")
print "Delay %.2f after spoof top click: %s" % (delay1, top.text)
if top.is_displayed():
sleep(delay1)
top.click()
except:
print "Error: failed to find preceding-sibling and click"
pass
if span.is_displayed():
print "Delay %.2f after spoof click (%d of %d): %s" % (delay2, count, total, attr)
count += 1
sleep(delay2)
try:
span.click()
except:
print "Error: failed to click span"
pass
return
def delay_get(driver, url):
delay = round(random.uniform(3, 5), 2)
print "Delay %.2f after get(): %s" % (delay, url)
driver.get(url)
sleep(delay)
def delay_get_spoof(driver, url):
spoof_click(driver)
delay = round(random.uniform(3, 5), 2)
print "Delay %.2f after get(): %s" % (delay, url)
driver.get(url)
sleep(delay)
def delay_click(element):
delay1 = round(random.uniform(1.5, 1), 2)
delay2 = round(random.uniform(3, 5), 2)
text = element.text
if text == "":
text = element.get_attribute("value")
if text == "":
text = element.get_attribute("name")
if text == "":
text = element.get_attribute("id")
print "Click with delay %.2f %.2f: %s" % (delay1, delay2, text)
sleep(delay1)
element.click()
sleep(delay2)
def enable_api(driver, project_id, api):
base_url = "https://console.developers.google.com/project/%s/apiui/apiview/%s/overview"
for a in api:
url = base_url % (project_id, a)
delay_get_spoof(driver, url)
span = driver.find_element_by_xpath("//span[@class=\"p6n-loading-button-regular-text\"]")
print "Enable API: %s" % a
span.click()
def pass_phone_check1(driver):
# radio
print "pass_phone_check1"
radio = driver.find_element_by_xpath("//input[@id=\"PhoneVerificationChallenge\"]")
radio.click()
phone_number = driver.find_element_by_xpath("//input[@id=\"phoneNumber\"]")
phone_number.send_keys(PHONE_NUMBER)
submit = driver.find_element_by_xpath("//input[@id=\"submitChallenge\"]")
submit.click()
def pass_phone_check2(driver):
print "pass_phone_check2"
submit = driver.find_element_by_xpath("//input[@id=\"save\"]")
submit.click()
def run():
if not DEBUG:
global display
display = Display(visible=0, size=(800, 3200)) # extend the display to make more items accessible
display.start()
# download json credential file without ask
profile = webdriver.FirefoxProfile()
profile.set_preference('browser.download.folderList', 2) # custom location
profile.set_preference('browser.download.manager.showWhenStarting', False)
profile.set_preference('browser.download.dir', FOLDER)
profile.set_preference('browser.helperApps.neverAsk.saveToDisk', 'application/json')
global driver
driver = webdriver.Firefox(profile)
driver.maximize_window()
url_console = "https://console.developers.google.com/project"
url_googlelogin = "https://accounts.google.com/ServiceLogin?continue=" + quote_plus(url_console)
delay_get(driver, url_googlelogin)
# login
delay_send_keys(driver.find_element_by_id("Email"), USER)
delay_send_keys(driver.find_element_by_id("Passwd"), PASSWORD)
delay_click(driver.find_element_by_id("signIn"))
# phone number check
try:
pass_phone_check1(driver)
except:
pass
try:
pass_phone_check2(driver)
except:
pass
if url_console != driver.current_url:
raise Exception("Login failed")
else:
print "Done: login"
try:
print "Trying: no-projects-create"
delay_click(driver.find_element_by_id("no-projects-create"))
except:
pass
try:
print "Trying: projects-create"
delay_click(driver.find_element_by_id("projects-create"))
except:
raise Exception("Both no-projects-create and projects-create failed")
#delay_click(driver.find_element_by_css_selector("span[id=\"tos-agree\"]"))
try:
agree = driver.find_element_by_css_selector("span[id=\"tos-agree\"]")
except:
pass
else:
delay_click(agree)
delay_send_keys(driver.find_element_by_name("name"), PROJECT)
delay_click(driver.find_element_by_name("ok"))
print "Creating project with name: %s" % PROJECT
new_project_id = create_project(driver)
# page consent
url_project = url_console + "/" + new_project_id + "/apiui/consent"
delay_get_spoof(driver, url_project)
# email
delay_click(driver.find_element_by_css_selector('div[class="goog-inline-block goog-flat-menu-button-caption"]'))
delay_click(driver.find_element_by_class_name("goog-menuitem"))
# product name
delay_send_keys(driver.find_element_by_name("displayName"), "application")
delay_click(driver.find_element_by_id("api-consent-save"))
print "Done: consent"
# page credential
url_credential = url_console + "/" + new_project_id + "/apiui/credential"
delay_get_spoof(driver, url_credential)
# Create new Client ID
delay_click(driver.find_element_by_css_selector('jfk-button[jfk-button-style="PRIMARY"]'))
# Installed application
txt_application = "Installed application"
label_application = driver.find_elements_by_css_selector('label')[2]
if txt_application in label_application.text:
delay_click(label_application)
else:
raise Exception("lable not found: %s" % txt_application)
# Other
txt_other = "Other"
label_other = driver.find_elements_by_css_selector('label')[7]
if txt_other in label_other.text:
delay_click(label_other)
else:
raise Exception("lable not found: %s" % txt_other)
# OK
delay_click(driver.find_element_by_name("ok"))
print "Done: credential"
# json URL
url_downloadjson = driver.find_element_by_css_selector('a[class="goog-inline-block jfk-button jfk-button-standard ng-scope"]').get_attribute("href")
# get json file
delay_get_spoof(driver, url_downloadjson)
s = re.search(r'clientId=([^&]+)', url_downloadjson, flags=re.I)
client_id = s.group(1)
credential_file = os.path.join(FOLDER, "client_secret_"+client_id+".json")
f = open(credential_file, 'rb')
if client_id in f.read():
# rename
new_name = os.path.join(FOLDER, USER+"="+new_project_id+".json")
if os.path.isfile(new_name):
os.remove(new_name)
os.rename(credential_file, new_name)
print "Done: credential json file: %s" % new_name
else:
raise Exception("Error: credential file invalid: %s" % credential_file)
# enable APIs
apis = ["drive", "fusiontables"]
enable_api(driver, new_project_id, apis)
return 0
def unload():
print "Done: unloading webdriver and virtualdisplay"
if not DEBUG:
driver.quit()
display.stop()
if __name__ == "__main__":
try:
result = run()
except:
result = 1
driver.get_screenshot_as_file("/tmp/webdriver_screenshot.png")
with open("/tmp/webdriver_source.html", "wb") as f:
f.write(driver.page_source.encode('utf-8'))
raise
finally:
unload()
sys.exit(result)
|
laborautonomo/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/bliptv.py
|
7
|
from __future__ import unicode_literals
import datetime
import re
from .common import InfoExtractor
from .subtitles import SubtitlesInfoExtractor
from ..utils import (
compat_str,
compat_urllib_request,
unescapeHTML,
)
class BlipTVIE(SubtitlesInfoExtractor):
"""Information extractor for blip.tv"""
_VALID_URL = r'https?://(?:\w+\.)?blip\.tv/((.+/)|(play/)|(api\.swf#))(?P<presumptive_id>.+)$'
_TESTS = [{
'url': 'http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352',
'md5': 'c6934ad0b6acf2bd920720ec888eb812',
'info_dict': {
'id': '5779306',
'ext': 'mov',
'upload_date': '20111205',
'description': 'md5:9bc31f227219cde65e47eeec8d2dc596',
'uploader': 'Comic Book Resources - CBR TV',
'title': 'CBR EXCLUSIVE: "Gotham City Imposters" Bats VS Jokerz Short 3',
}
}, {
# https://github.com/rg3/youtube-dl/pull/2274
'note': 'Video with subtitles',
'url': 'http://blip.tv/play/h6Uag5OEVgI.html',
'md5': '309f9d25b820b086ca163ffac8031806',
'info_dict': {
'id': '6586561',
'ext': 'mp4',
'uploader': 'Red vs. Blue',
'description': 'One-Zero-One',
'upload_date': '20130614',
'title': 'Red vs. Blue Season 11 Episode 1',
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
presumptive_id = mobj.group('presumptive_id')
# See https://github.com/rg3/youtube-dl/issues/857
embed_mobj = re.match(r'https?://(?:\w+\.)?blip\.tv/(?:play/|api\.swf#)([a-zA-Z0-9]+)', url)
if embed_mobj:
info_url = 'http://blip.tv/play/%s.x?p=1' % embed_mobj.group(1)
info_page = self._download_webpage(info_url, embed_mobj.group(1))
video_id = self._search_regex(
r'data-episode-id="([0-9]+)', info_page, 'video_id')
return self.url_result('http://blip.tv/a/a-' + video_id, 'BlipTV')
cchar = '&' if '?' in url else '?'
json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
request = compat_urllib_request.Request(json_url)
request.add_header('User-Agent', 'iTunes/10.6.1')
json_data = self._download_json(request, video_id=presumptive_id)
if 'Post' in json_data:
data = json_data['Post']
else:
data = json_data
video_id = compat_str(data['item_id'])
upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
subtitles = {}
formats = []
if 'additionalMedia' in data:
for f in data['additionalMedia']:
if f.get('file_type_srt') == 1:
LANGS = {
'english': 'en',
}
lang = f['role'].rpartition('-')[-1].strip().lower()
langcode = LANGS.get(lang, lang)
subtitles[langcode] = f['url']
continue
if not int(f['media_width']): # filter m3u8
continue
formats.append({
'url': f['url'],
'format_id': f['role'],
'width': int(f['media_width']),
'height': int(f['media_height']),
})
else:
formats.append({
'url': data['media']['url'],
'width': int(data['media']['width']),
'height': int(data['media']['height']),
})
self._sort_formats(formats)
# subtitles
video_subtitles = self.extract_subtitles(video_id, subtitles)
if self._downloader.params.get('listsubtitles', False):
self._list_available_subtitles(video_id, subtitles)
return
return {
'id': video_id,
'uploader': data['display_name'],
'upload_date': upload_date,
'title': data['title'],
'thumbnail': data['thumbnailUrl'],
'description': data['description'],
'user_agent': 'iTunes/10.6.1',
'formats': formats,
'subtitles': video_subtitles,
}
def _download_subtitle_url(self, sub_lang, url):
# For some weird reason, blip.tv serves a video instead of subtitles
# when we request with a common UA
req = compat_urllib_request.Request(url)
req.add_header('Youtubedl-user-agent', 'youtube-dl')
return self._download_webpage(req, None, note=False)
class BlipTVUserIE(InfoExtractor):
_VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$'
_PAGE_SIZE = 12
IE_NAME = 'blip.tv:user'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
username = mobj.group(1)
page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1'
page = self._download_webpage(url, username, 'Downloading user page')
mobj = re.search(r'data-users-id="([^"]+)"', page)
page_base = page_base % mobj.group(1)
# Download video ids using BlipTV Ajax calls. Result size per
# query is limited (currently to 12 videos) so we need to query
# page by page until there are no video ids - it means we got
# all of them.
video_ids = []
pagenum = 1
while True:
url = page_base + "&page=" + str(pagenum)
page = self._download_webpage(
url, username, 'Downloading video ids from page %d' % pagenum)
# Extract video identifiers
ids_in_page = []
for mobj in re.finditer(r'href="/([^"]+)"', page):
if mobj.group(1) not in ids_in_page:
ids_in_page.append(unescapeHTML(mobj.group(1)))
video_ids.extend(ids_in_page)
# A little optimization - if current page is not
# "full", ie. does not contain PAGE_SIZE video ids then
# we can assume that this page is the last one - there
# are no more ids on further pages - no need to query
# again.
if len(ids_in_page) < self._PAGE_SIZE:
break
pagenum += 1
urls = ['http://blip.tv/%s' % video_id for video_id in video_ids]
url_entries = [self.url_result(vurl, 'BlipTV') for vurl in urls]
return [self.playlist_result(url_entries, playlist_title=username)]
|
toontownfunserver/Panda3D-1.9.0
|
refs/heads/master
|
python/Lib/ntpath.py
|
127
|
# Module 'ntpath' -- common operations on WinNT/Win95 pathnames
"""Common pathname manipulations, WindowsNT/95 version.
Instead of importing this module directly, import os and refer to this
module as os.path.
"""
import os
import sys
import stat
import genericpath
import warnings
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"ismount","walk","expanduser","expandvars","normpath","abspath",
"splitunc","curdir","pardir","sep","pathsep","defpath","altsep",
"extsep","devnull","realpath","supports_unicode_filenames","relpath"]
# strings representing various path-related bits and pieces
curdir = '.'
pardir = '..'
extsep = '.'
sep = '\\'
pathsep = ';'
altsep = '/'
defpath = '.;C:\\bin'
if 'ce' in sys.builtin_module_names:
defpath = '\\Windows'
elif 'os2' in sys.builtin_module_names:
# OS/2 w/ VACPP
altsep = '/'
devnull = 'nul'
# Normalize the case of a pathname and map slashes to backslashes.
# Other normalizations (such as optimizing '../' away) are not done
# (this is done by normpath).
def normcase(s):
"""Normalize case of pathname.
Makes all characters lowercase and all slashes into backslashes."""
return s.replace("/", "\\").lower()
# Return whether a path is absolute.
# Trivial in Posix, harder on the Mac or MS-DOS.
# For DOS it is absolute if it starts with a slash or backslash (current
# volume), or if a pathname after the volume letter and colon / UNC resource
# starts with a slash or backslash.
def isabs(s):
"""Test whether a path is absolute"""
s = splitdrive(s)[1]
return s != '' and s[:1] in '/\\'
# Join two (or more) paths.
def join(a, *p):
"""Join two or more pathname components, inserting "\\" as needed.
If any component is an absolute path, all previous path components
will be discarded."""
path = a
for b in p:
b_wins = 0 # set to 1 iff b makes path irrelevant
if path == "":
b_wins = 1
elif isabs(b):
# This probably wipes out path so far. However, it's more
# complicated if path begins with a drive letter:
# 1. join('c:', '/a') == 'c:/a'
# 2. join('c:/', '/a') == 'c:/a'
# But
# 3. join('c:/a', '/b') == '/b'
# 4. join('c:', 'd:/') = 'd:/'
# 5. join('c:/', 'd:/') = 'd:/'
if path[1:2] != ":" or b[1:2] == ":":
# Path doesn't start with a drive letter, or cases 4 and 5.
b_wins = 1
# Else path has a drive letter, and b doesn't but is absolute.
elif len(path) > 3 or (len(path) == 3 and
path[-1] not in "/\\"):
# case 3
b_wins = 1
if b_wins:
path = b
else:
# Join, and ensure there's a separator.
assert len(path) > 0
if path[-1] in "/\\":
if b and b[0] in "/\\":
path += b[1:]
else:
path += b
elif path[-1] == ":":
path += b
elif b:
if b[0] in "/\\":
path += b
else:
path += "\\" + b
else:
# path is not empty and does not end with a backslash,
# but b is empty; since, e.g., split('a/') produces
# ('a', ''), it's best if join() adds a backslash in
# this case.
path += '\\'
return path
# Split a path in a drive specification (a drive letter followed by a
# colon) and the path specification.
# It is always true that drivespec + pathspec == p
def splitdrive(p):
"""Split a pathname into drive and path specifiers. Returns a 2-tuple
"(drive,path)"; either part may be empty"""
if p[1:2] == ':':
return p[0:2], p[2:]
return '', p
# Parse UNC paths
def splitunc(p):
"""Split a pathname into UNC mount point and relative path specifiers.
Return a 2-tuple (unc, rest); either part may be empty.
If unc is not empty, it has the form '//host/mount' (or similar
using backslashes). unc+rest is always the input path.
Paths containing drive letters never have an UNC part.
"""
if p[1:2] == ':':
return '', p # Drive letter present
firstTwo = p[0:2]
if firstTwo == '//' or firstTwo == '\\\\':
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvv equivalent to drive letter
# \\machine\mountpoint\directories...
# directory ^^^^^^^^^^^^^^^
normp = normcase(p)
index = normp.find('\\', 2)
if index == -1:
##raise RuntimeError, 'illegal UNC path: "' + p + '"'
return ("", p)
index = normp.find('\\', index + 1)
if index == -1:
index = len(p)
return p[:index], p[index:]
return '', p
# Split a path in head (everything up to the last '/') and tail (the
# rest). After the trailing '/' is stripped, the invariant
# join(head, tail) == p holds.
# The resulting head won't end in '/' unless it is the root.
def split(p):
"""Split a pathname.
Return tuple (head, tail) where tail is everything after the final slash.
Either part may be empty."""
d, p = splitdrive(p)
# set i to index beyond p's last slash
i = len(p)
while i and p[i-1] not in '/\\':
i = i - 1
head, tail = p[:i], p[i:] # now tail has no slashes
# remove trailing slashes from head, unless it's all slashes
head2 = head
while head2 and head2[-1] in '/\\':
head2 = head2[:-1]
head = head2 or head
return d + head, tail
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p):
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
# Return the tail (basename) part of a path.
def basename(p):
"""Returns the final component of a pathname"""
return split(p)[1]
# Return the head (dirname) part of a path.
def dirname(p):
"""Returns the directory component of a pathname"""
return split(p)[0]
# Is a path a symbolic link?
# This will always return false on systems where posix.lstat doesn't exist.
def islink(path):
"""Test for symbolic link.
On WindowsNT/95 and OS/2 always returns false
"""
return False
# alias exists to lexists
lexists = exists
# Is a path a mount point? Either a root (with or without drive letter)
# or an UNC path with at most a / or \ after the mount point.
def ismount(path):
"""Test whether a path is a mount point (defined as root of drive)"""
unc, rest = splitunc(path)
if unc:
return rest in ("", "/", "\\")
p = splitdrive(path)[1]
return len(p) == 1 and p[0] in '/\\'
# Directory tree walk.
# For each directory under top (including top itself, but excluding
# '.' and '..'), func(arg, dirname, filenames) is called, where
# dirname is the name of the directory and filenames is the list
# of files (and subdirectories etc.) in the directory.
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.",
stacklevel=2)
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
# Expand paths beginning with '~' or '~user'.
# '~' means $HOME; '~user' means that user's home directory.
# If the path doesn't begin with '~', or if the user or $HOME is unknown,
# the path is returned unchanged (leaving error reporting to whatever
# function is called with the expanded path as argument).
# See also module 'glob' for expansion of *, ? and [...] in pathnames.
# (A function should also be defined to do full *sh-style environment
# variable expansion.)
def expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = os.environ['HOME']
elif 'USERPROFILE' in os.environ:
userhome = os.environ['USERPROFILE']
elif not 'HOMEPATH' in os.environ:
return path
else:
try:
drive = os.environ['HOMEDRIVE']
except KeyError:
drive = ''
userhome = join(drive, os.environ['HOMEPATH'])
if i != 1: #~user
userhome = join(dirname(userhome), path[1:i])
return userhome + path[i:]
# Expand paths containing shell variable substitutions.
# The following rules apply:
# - no expansion within single quotes
# - '$$' is translated into '$'
# - '%%' is translated into '%' if '%%' are not seen in %var1%%var2%
# - ${varname} is accepted.
# - $varname is accepted.
# - %varname% is accepted.
# - varnames can be made out of letters, digits and the characters '_-'
# (though is not verified in the ${varname} and %varname% cases)
# XXX With COMMAND.COM you can use any characters in a variable name,
# XXX except '^|<>='.
def expandvars(path):
"""Expand shell variables of the forms $var, ${var} and %var%.
Unknown variables are left unchanged."""
if '$' not in path and '%' not in path:
return path
import string
varchars = string.ascii_letters + string.digits + '_-'
res = ''
index = 0
pathlen = len(path)
while index < pathlen:
c = path[index]
if c == '\'': # no expansion within single quotes
path = path[index + 1:]
pathlen = len(path)
try:
index = path.index('\'')
res = res + '\'' + path[:index + 1]
except ValueError:
res = res + path
index = pathlen - 1
elif c == '%': # variable or '%'
if path[index + 1:index + 2] == '%':
res = res + c
index = index + 1
else:
path = path[index+1:]
pathlen = len(path)
try:
index = path.index('%')
except ValueError:
res = res + '%' + path
index = pathlen - 1
else:
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '%' + var + '%'
elif c == '$': # variable or '$$'
if path[index + 1:index + 2] == '$':
res = res + c
index = index + 1
elif path[index + 1:index + 2] == '{':
path = path[index+2:]
pathlen = len(path)
try:
index = path.index('}')
var = path[:index]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '${' + var + '}'
except ValueError:
res = res + '${' + path
index = pathlen - 1
else:
var = ''
index = index + 1
c = path[index:index + 1]
while c != '' and c in varchars:
var = var + c
index = index + 1
c = path[index:index + 1]
if var in os.environ:
res = res + os.environ[var]
else:
res = res + '$' + var
if c != '':
index = index - 1
else:
res = res + c
index = index + 1
return res
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A\B.
# Previously, this function also truncated pathnames to 8+3 format,
# but as this module is called "ntpath", that's obviously wrong!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
# Preserve unicode (if path is unicode)
backslash, dot = (u'\\', u'.') if isinstance(path, unicode) else ('\\', '.')
if path.startswith(('\\\\.\\', '\\\\?\\')):
# in the case of paths with these prefixes:
# \\.\ -> device names
# \\?\ -> literal paths
# do not do any normalization, but return the path unchanged
return path
path = path.replace("/", "\\")
prefix, path = splitdrive(path)
# We need to be careful here. If the prefix is empty, and the path starts
# with a backslash, it could either be an absolute path on the current
# drive (\dir1\dir2\file) or a UNC filename (\\server\mount\dir1\file). It
# is therefore imperative NOT to collapse multiple backslashes blindly in
# that case.
# The code below preserves multiple backslashes when there is no drive
# letter. This means that the invalid filename \\\a\b is preserved
# unchanged, where a\\\b is normalised to a\b. It's not clear that there
# is any better behaviour for such edge cases.
if prefix == '':
# No drive letter - preserve initial backslashes
while path[:1] == "\\":
prefix = prefix + backslash
path = path[1:]
else:
# We have a drive letter - collapse initial backslashes
if path.startswith("\\"):
prefix = prefix + backslash
path = path.lstrip("\\")
comps = path.split("\\")
i = 0
while i < len(comps):
if comps[i] in ('.', ''):
del comps[i]
elif comps[i] == '..':
if i > 0 and comps[i-1] != '..':
del comps[i-1:i+1]
i -= 1
elif i == 0 and prefix.endswith("\\"):
del comps[i]
else:
i += 1
else:
i += 1
# If the path is now empty, substitute '.'
if not prefix and not comps:
comps.append(dot)
return prefix + backslash.join(comps)
# Return an absolute path.
try:
from nt import _getfullpathname
except ImportError: # not running on Windows - mock up something sensible
def abspath(path):
"""Return the absolute version of a path."""
if not isabs(path):
if isinstance(path, unicode):
cwd = os.getcwdu()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
else: # use native Windows method on Windows
def abspath(path):
"""Return the absolute version of a path."""
if path: # Empty path must return current working directory.
try:
path = _getfullpathname(path)
except WindowsError:
pass # Bad path - return unchanged.
elif isinstance(path, unicode):
path = os.getcwdu()
else:
path = os.getcwd()
return normpath(path)
# realpath is a no-op on systems without islink support
realpath = abspath
# Win9x family and earlier have no Unicode filename support.
supports_unicode_filenames = (hasattr(sys, "getwindowsversion") and
sys.getwindowsversion()[3] >= 2)
def _abspath_split(path):
abs = abspath(normpath(path))
prefix, rest = splitunc(abs)
is_unc = bool(prefix)
if not is_unc:
prefix, rest = splitdrive(abs)
return is_unc, prefix, [x for x in rest.split(sep) if x]
def relpath(path, start=curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_is_unc, start_prefix, start_list = _abspath_split(start)
path_is_unc, path_prefix, path_list = _abspath_split(path)
if path_is_unc ^ start_is_unc:
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
if path_prefix.lower() != start_prefix.lower():
if path_is_unc:
raise ValueError("path is on UNC root %s, start on UNC root %s"
% (path_prefix, start_prefix))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_prefix, start_prefix))
# Work out how much of the filepath is shared by start and path.
i = 0
for e1, e2 in zip(start_list, path_list):
if e1.lower() != e2.lower():
break
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
try:
# The genericpath.isdir implementation uses os.stat and checks the mode
# attribute to tell whether or not the path is a directory.
# This is overkill on Windows - just pass the path to GetFileAttributes
# and check the attribute from there.
from nt import _isdir as isdir
except ImportError:
# Use genericpath.isdir as imported above.
pass
|
AlexAsh/pycembed15
|
refs/heads/master
|
testftw.py
|
1
|
from pyftw import py_ftw, py_nftw, FTW_PHYS
def walk_ftw(fpath, sb, typeflag):
print fpath
return 0
def walk_nftw(fpath, sb, typeflag, ftwbuf):
print fpath
return 0
print "------------------------------"
py_ftw('.', walk_ftw, 1)
print "------------------------------"
py_nftw('.', walk_nftw, 1, FTW_PHYS)
print "------------------------------"
|
yuanagain/seniorthesis
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/compat.py
|
2942
|
######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Ian Cordasco - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
base_str = (str, unicode)
else:
base_str = (bytes, str)
def wrap_ord(a):
if sys.version_info < (3, 0) and isinstance(a, base_str):
return ord(a)
else:
return a
|
redbo/swift
|
refs/heads/master
|
test/unit/container/test_reconciler.py
|
1
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import numbers
import mock
import operator
import time
import unittest
import socket
import os
import errno
import itertools
import random
from collections import defaultdict
from datetime import datetime
from six.moves import urllib
from swift.container import reconciler
from swift.container.server import gen_resp_headers
from swift.common.direct_client import ClientException
from swift.common import swob
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.utils import split_path, Timestamp, encode_timestamps
from test.unit import debug_logger, FakeRing, fake_http_connect
from test.unit.common.middleware.helpers import FakeSwift
def timestamp_to_last_modified(timestamp):
return datetime.utcfromtimestamp(
float(Timestamp(timestamp))).strftime('%Y-%m-%dT%H:%M:%S.%f')
def container_resp_headers(**kwargs):
return HeaderKeyDict(gen_resp_headers(kwargs))
class FakeStoragePolicySwift(object):
def __init__(self):
self.storage_policy = defaultdict(FakeSwift)
self._mock_oldest_spi_map = {}
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
return getattr(self.storage_policy[None], name)
def __call__(self, env, start_response):
method = env['REQUEST_METHOD']
path = env['PATH_INFO']
_, acc, cont, obj = split_path(env['PATH_INFO'], 0, 4,
rest_with_last=True)
if not obj:
policy_index = None
else:
policy_index = self._mock_oldest_spi_map.get(cont, 0)
# allow backend policy override
if 'HTTP_X_BACKEND_STORAGE_POLICY_INDEX' in env:
policy_index = int(env['HTTP_X_BACKEND_STORAGE_POLICY_INDEX'])
try:
return self.storage_policy[policy_index].__call__(
env, start_response)
except KeyError:
pass
if method == 'PUT':
resp_class = swob.HTTPCreated
else:
resp_class = swob.HTTPNotFound
self.storage_policy[policy_index].register(
method, path, resp_class, {}, '')
return self.storage_policy[policy_index].__call__(
env, start_response)
class FakeInternalClient(reconciler.InternalClient):
def __init__(self, listings):
self.app = FakeStoragePolicySwift()
self.user_agent = 'fake-internal-client'
self.request_tries = 1
self.parse(listings)
def parse(self, listings):
self.accounts = defaultdict(lambda: defaultdict(list))
for item, timestamp in listings.items():
# XXX this interface is stupid
if isinstance(timestamp, tuple):
timestamp, content_type = timestamp
else:
timestamp, content_type = timestamp, 'application/x-put'
storage_policy_index, path = item
account, container_name, obj_name = split_path(
path.encode('utf-8'), 0, 3, rest_with_last=True)
self.accounts[account][container_name].append(
(obj_name, storage_policy_index, timestamp, content_type))
for account_name, containers in self.accounts.items():
for con in containers:
self.accounts[account_name][con].sort(key=lambda t: t[0])
for account, containers in self.accounts.items():
account_listing_data = []
account_path = '/v1/%s' % account
for container, objects in containers.items():
container_path = account_path + '/' + container
container_listing_data = []
for entry in objects:
(obj_name, storage_policy_index,
timestamp, content_type) = entry
if storage_policy_index is None and not obj_name:
# empty container
continue
obj_path = container_path + '/' + obj_name
ts = Timestamp(timestamp)
headers = {'X-Timestamp': ts.normal,
'X-Backend-Timestamp': ts.internal}
# register object response
self.app.storage_policy[storage_policy_index].register(
'GET', obj_path, swob.HTTPOk, headers)
self.app.storage_policy[storage_policy_index].register(
'DELETE', obj_path, swob.HTTPNoContent, {})
# container listing entry
last_modified = timestamp_to_last_modified(timestamp)
# some tests setup mock listings using floats, some use
# strings, so normalize here
if isinstance(timestamp, numbers.Number):
timestamp = '%f' % timestamp
obj_data = {
'bytes': 0,
# listing data is unicode
'name': obj_name.decode('utf-8'),
'last_modified': last_modified,
'hash': timestamp.decode('utf-8'),
'content_type': content_type,
}
container_listing_data.append(obj_data)
container_listing_data.sort(key=operator.itemgetter('name'))
# register container listing response
container_headers = {}
container_qry_string = '?format=json&marker=&end_marker='
self.app.register('GET', container_path + container_qry_string,
swob.HTTPOk, container_headers,
json.dumps(container_listing_data))
if container_listing_data:
obj_name = container_listing_data[-1]['name']
# client should quote and encode marker
end_qry_string = '?format=json&marker=%s&end_marker=' % (
urllib.parse.quote(obj_name.encode('utf-8')))
self.app.register('GET', container_path + end_qry_string,
swob.HTTPOk, container_headers,
json.dumps([]))
self.app.register('DELETE', container_path,
swob.HTTPConflict, {}, '')
# simple account listing entry
container_data = {'name': container}
account_listing_data.append(container_data)
# register account response
account_listing_data.sort(key=operator.itemgetter('name'))
account_headers = {}
account_qry_string = '?format=json&marker=&end_marker='
self.app.register('GET', account_path + account_qry_string,
swob.HTTPOk, account_headers,
json.dumps(account_listing_data))
end_qry_string = '?format=json&marker=%s&end_marker=' % (
urllib.parse.quote(account_listing_data[-1]['name']))
self.app.register('GET', account_path + end_qry_string,
swob.HTTPOk, account_headers,
json.dumps([]))
class TestReconcilerUtils(unittest.TestCase):
def setUp(self):
self.fake_ring = FakeRing()
reconciler.direct_get_container_policy_index.reset()
def test_parse_raw_obj(self):
got = reconciler.parse_raw_obj({
'name': "2:/AUTH_bob/con/obj",
'hash': Timestamp(2017551.49350).internal,
'last_modified': timestamp_to_last_modified(2017551.49352),
'content_type': 'application/x-delete',
})
self.assertEqual(got['q_policy_index'], 2)
self.assertEqual(got['account'], 'AUTH_bob')
self.assertEqual(got['container'], 'con')
self.assertEqual(got['obj'], 'obj')
self.assertEqual(got['q_ts'], 2017551.49350)
self.assertEqual(got['q_record'], 2017551.49352)
self.assertEqual(got['q_op'], 'DELETE')
got = reconciler.parse_raw_obj({
'name': "1:/AUTH_bob/con/obj",
'hash': Timestamp(1234.20190).internal,
'last_modified': timestamp_to_last_modified(1234.20192),
'content_type': 'application/x-put',
})
self.assertEqual(got['q_policy_index'], 1)
self.assertEqual(got['account'], 'AUTH_bob')
self.assertEqual(got['container'], 'con')
self.assertEqual(got['obj'], 'obj')
self.assertEqual(got['q_ts'], 1234.20190)
self.assertEqual(got['q_record'], 1234.20192)
self.assertEqual(got['q_op'], 'PUT')
# the 'hash' field in object listing has the raw 'created_at' value
# which could be a composite of timestamps
timestamp_str = encode_timestamps(Timestamp(1234.20190),
Timestamp(1245.20190),
Timestamp(1256.20190),
explicit=True)
got = reconciler.parse_raw_obj({
'name': "1:/AUTH_bob/con/obj",
'hash': timestamp_str,
'last_modified': timestamp_to_last_modified(1234.20192),
'content_type': 'application/x-put',
})
self.assertEqual(got['q_policy_index'], 1)
self.assertEqual(got['account'], 'AUTH_bob')
self.assertEqual(got['container'], 'con')
self.assertEqual(got['obj'], 'obj')
self.assertEqual(got['q_ts'], 1234.20190)
self.assertEqual(got['q_record'], 1234.20192)
self.assertEqual(got['q_op'], 'PUT')
# negative test
obj_info = {
'name': "1:/AUTH_bob/con/obj",
'hash': Timestamp(1234.20190).internal,
'last_modified': timestamp_to_last_modified(1234.20192),
}
self.assertRaises(ValueError, reconciler.parse_raw_obj, obj_info)
obj_info['content_type'] = 'foo'
self.assertRaises(ValueError, reconciler.parse_raw_obj, obj_info)
obj_info['content_type'] = 'appliation/x-post'
self.assertRaises(ValueError, reconciler.parse_raw_obj, obj_info)
self.assertRaises(ValueError, reconciler.parse_raw_obj,
{'name': 'bogus'})
self.assertRaises(ValueError, reconciler.parse_raw_obj,
{'name': '-1:/AUTH_test/container'})
self.assertRaises(ValueError, reconciler.parse_raw_obj,
{'name': 'asdf:/AUTH_test/c/obj'})
self.assertRaises(KeyError, reconciler.parse_raw_obj,
{'name': '0:/AUTH_test/c/obj',
'content_type': 'application/x-put'})
def test_get_container_policy_index(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=1,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
]
for permutation in itertools.permutations((0, 1, 2)):
reconciler.direct_get_container_policy_index.reset()
resp_headers = [stub_resp_headers[i] for i in permutation]
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
test_values = [(info['x-storage-policy-index'],
info['x-backend-status-changed-at']) for
info in resp_headers]
self.assertEqual(oldest_spi, 0,
"oldest policy index wrong "
"for permutation %r" % test_values)
def test_get_container_policy_index_with_error(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_change_at=next(ts),
storage_policy_index=2,
),
container_resp_headers(
status_changed_at=next(ts),
storage_policy_index=1,
),
# old timestamp, but 500 should be ignored...
ClientException(
'Container Server blew up',
http_status=500, http_reason='Server Error',
http_headers=container_resp_headers(
status_changed_at=Timestamp(0).internal,
storage_policy_index=0,
),
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 2)
def test_get_container_policy_index_with_socket_error(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=1,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 1)
def test_get_container_policy_index_with_too_many_errors(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
ClientException(
'Container Server blew up',
http_status=500, http_reason='Server Error',
http_headers=container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=1,
),
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertIsNone(oldest_spi)
def test_get_container_policy_index_for_deleted(self):
mock_path = 'swift.container.reconciler.direct_head_container'
headers = container_resp_headers(
status_changed_at=Timestamp(time.time()).internal,
storage_policy_index=1,
)
stub_resp_headers = [
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=headers,
),
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=headers,
),
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=headers,
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 1)
def test_get_container_policy_index_for_recently_deleted(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=container_resp_headers(
put_timestamp=next(ts),
delete_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=0,
),
),
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=container_resp_headers(
put_timestamp=next(ts),
delete_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=1,
),
),
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=container_resp_headers(
put_timestamp=next(ts),
delete_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=2,
),
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 2)
def test_get_container_policy_index_for_recently_recreated(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
# old put, no recreate
container_resp_headers(
delete_timestamp=0,
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=0,
),
# recently deleted
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=container_resp_headers(
put_timestamp=next(ts),
delete_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=1,
),
),
# recently recreated
container_resp_headers(
delete_timestamp=next(ts),
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=2,
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 2)
def test_get_container_policy_index_for_recently_split_brain(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
# oldest put
container_resp_headers(
delete_timestamp=0,
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=0,
),
# old recreate
container_resp_headers(
delete_timestamp=next(ts),
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=1,
),
# recently put
container_resp_headers(
delete_timestamp=0,
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=2,
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 1)
def test_get_container_policy_index_cache(self):
now = time.time()
ts = itertools.count(int(now))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=1,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 0)
# re-mock with errors
stub_resp_headers = [
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
]
with mock.patch('time.time', new=lambda: now):
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
# still cached
self.assertEqual(oldest_spi, 0)
# propel time forward
the_future = now + 31
with mock.patch('time.time', new=lambda: the_future):
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
# expired
self.assertIsNone(oldest_spi)
def test_direct_delete_container_entry(self):
mock_path = 'swift.common.direct_client.http_connect'
connect_args = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
connect_args.append({
'ipaddr': ipaddr, 'port': port, 'device': device,
'partition': partition, 'method': method, 'path': path,
'headers': headers, 'query_string': query_string})
x_timestamp = Timestamp(time.time())
headers = {'x-timestamp': x_timestamp.internal}
fake_hc = fake_http_connect(200, 200, 200, give_connect=test_connect)
with mock.patch(mock_path, fake_hc):
reconciler.direct_delete_container_entry(
self.fake_ring, 'a', 'c', 'o', headers=headers)
self.assertEqual(len(connect_args), 3)
for args in connect_args:
self.assertEqual(args['method'], 'DELETE')
self.assertEqual(args['path'], '/a/c/o')
self.assertEqual(args['headers'].get('x-timestamp'),
headers['x-timestamp'])
def test_direct_delete_container_entry_with_errors(self):
# setup mock direct_delete
mock_path = \
'swift.container.reconciler.direct_delete_container_object'
stub_resp = [
None,
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
ClientException(
'Container Server blew up',
'10.0.0.12', 6201, 'sdj', 404, 'Not Found'
),
]
mock_direct_delete = mock.MagicMock()
mock_direct_delete.side_effect = stub_resp
with mock.patch(mock_path, mock_direct_delete), \
mock.patch('eventlet.greenpool.DEBUG', False):
rv = reconciler.direct_delete_container_entry(
self.fake_ring, 'a', 'c', 'o')
self.assertIsNone(rv)
self.assertEqual(len(mock_direct_delete.mock_calls), 3)
def test_add_to_reconciler_queue(self):
mock_path = 'swift.common.direct_client.http_connect'
connect_args = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
connect_args.append({
'ipaddr': ipaddr, 'port': port, 'device': device,
'partition': partition, 'method': method, 'path': path,
'headers': headers, 'query_string': query_string})
fake_hc = fake_http_connect(200, 200, 200, give_connect=test_connect)
with mock.patch(mock_path, fake_hc):
ret = reconciler.add_to_reconciler_queue(
self.fake_ring, 'a', 'c', 'o', 17, 5948918.63946, 'DELETE')
self.assertTrue(ret)
self.assertEqual(ret, str(int(5948918.63946 // 3600 * 3600)))
self.assertEqual(len(connect_args), 3)
required_headers = ('x-content-type', 'x-etag')
for args in connect_args:
self.assertEqual(args['headers']['X-Timestamp'], '5948918.63946')
self.assertEqual(args['path'],
'/.misplaced_objects/5947200/17:/a/c/o')
self.assertEqual(args['headers']['X-Content-Type'],
'application/x-delete')
for header in required_headers:
self.assertTrue(header in args['headers'],
'%r was missing request headers %r' % (
header, args['headers']))
def test_add_to_reconciler_queue_force(self):
mock_path = 'swift.common.direct_client.http_connect'
connect_args = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
connect_args.append({
'ipaddr': ipaddr, 'port': port, 'device': device,
'partition': partition, 'method': method, 'path': path,
'headers': headers, 'query_string': query_string})
fake_hc = fake_http_connect(200, 200, 200, give_connect=test_connect)
now = time.time()
with mock.patch(mock_path, fake_hc), \
mock.patch('swift.container.reconciler.time.time',
lambda: now):
ret = reconciler.add_to_reconciler_queue(
self.fake_ring, 'a', 'c', 'o', 17, 5948918.63946, 'PUT',
force=True)
self.assertTrue(ret)
self.assertEqual(ret, str(int(5948918.63946 // 3600 * 3600)))
self.assertEqual(len(connect_args), 3)
required_headers = ('x-size', 'x-content-type')
for args in connect_args:
self.assertEqual(args['headers']['X-Timestamp'],
Timestamp(now).internal)
self.assertEqual(args['headers']['X-Etag'], '5948918.63946')
self.assertEqual(args['path'],
'/.misplaced_objects/5947200/17:/a/c/o')
for header in required_headers:
self.assertTrue(header in args['headers'],
'%r was missing request headers %r' % (
header, args['headers']))
def test_add_to_reconciler_queue_fails(self):
mock_path = 'swift.common.direct_client.http_connect'
fake_connects = [fake_http_connect(200),
fake_http_connect(200, raise_timeout_exc=True),
fake_http_connect(507)]
def fake_hc(*a, **kw):
return fake_connects.pop()(*a, **kw)
with mock.patch(mock_path, fake_hc):
ret = reconciler.add_to_reconciler_queue(
self.fake_ring, 'a', 'c', 'o', 17, 5948918.63946, 'PUT')
self.assertFalse(ret)
def test_add_to_reconciler_queue_socket_error(self):
mock_path = 'swift.common.direct_client.http_connect'
exc = socket.error(errno.ECONNREFUSED,
os.strerror(errno.ECONNREFUSED))
fake_connects = [fake_http_connect(200),
fake_http_connect(200, raise_timeout_exc=True),
fake_http_connect(500, raise_exc=exc)]
def fake_hc(*a, **kw):
return fake_connects.pop()(*a, **kw)
with mock.patch(mock_path, fake_hc):
ret = reconciler.add_to_reconciler_queue(
self.fake_ring, 'a', 'c', 'o', 17, 5948918.63946, 'DELETE')
self.assertFalse(ret)
def listing_qs(marker):
return "?format=json&marker=%s&end_marker=" % \
urllib.parse.quote(marker.encode('utf-8'))
class TestReconciler(unittest.TestCase):
maxDiff = None
def setUp(self):
self.logger = debug_logger()
conf = {}
with mock.patch('swift.container.reconciler.InternalClient'):
self.reconciler = reconciler.ContainerReconciler(conf)
self.reconciler.logger = self.logger
self.start_interval = int(time.time() // 3600 * 3600)
self.current_container_path = '/v1/.misplaced_objects/%d' % (
self.start_interval) + listing_qs('')
def _mock_listing(self, objects):
self.reconciler.swift = FakeInternalClient(objects)
self.fake_swift = self.reconciler.swift.app
def _mock_oldest_spi(self, container_oldest_spi_map):
self.fake_swift._mock_oldest_spi_map = container_oldest_spi_map
def _run_once(self):
"""
Helper method to run the reconciler once with appropriate direct-client
mocks in place.
Returns the list of direct-deleted container entries in the format
[(acc1, con1, obj1), ...]
"""
def mock_oldest_spi(ring, account, container_name):
return self.fake_swift._mock_oldest_spi_map.get(container_name, 0)
items = {
'direct_get_container_policy_index': mock_oldest_spi,
'direct_delete_container_entry': mock.DEFAULT,
}
mock_time_iter = itertools.count(self.start_interval)
with mock.patch.multiple(reconciler, **items) as mocks:
self.mock_delete_container_entry = \
mocks['direct_delete_container_entry']
with mock.patch('time.time', mock_time_iter.next):
self.reconciler.run_once()
return [c[1][1:4] for c in
mocks['direct_delete_container_entry'].mock_calls]
def test_invalid_queue_name(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/bogus"): 3618.84187,
})
deleted_container_entries = self._run_once()
# we try to find something useful
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('bogus'))])
# but only get the bogus record
self.assertEqual(self.reconciler.stats['invalid_record'], 1)
# and just leave it on the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertFalse(deleted_container_entries)
def test_invalid_queue_name_marches_onward(self):
# there's something useful there on the queue
self._mock_listing({
(None, "/.misplaced_objects/3600/00000bogus"): 3600.0000,
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
(1, "/AUTH_bob/c/o1"): 3618.84187,
})
self._mock_oldest_spi({'c': 1}) # already in the right spot!
deleted_container_entries = self._run_once()
# we get all the queue entries we can
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# and one is garbage
self.assertEqual(self.reconciler.stats['invalid_record'], 1)
# but the other is workable
self.assertEqual(self.reconciler.stats['noop_object'], 1)
# so pop the queue for that one
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_queue_name_with_policy_index_delimiter_in_name(self):
q_path = '.misplaced_objects/3600'
obj_path = "AUTH_bob/c:sneaky/o1:sneaky"
# there's something useful there on the queue
self._mock_listing({
(None, "/%s/1:/%s" % (q_path, obj_path)): 3618.84187,
(1, '/%s' % obj_path): 3618.84187,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# we find the misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/%s' % obj_path))])
# move it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/%s' % obj_path),
('DELETE', '/v1/%s' % obj_path)])
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/%s' % obj_path),
('PUT', '/v1/%s' % obj_path)])
# clean up the source
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=1).internal)
# and pop the queue for that one
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries, [(
'.misplaced_objects', '3600', '1:/%s' % obj_path)])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_unable_to_direct_get_oldest_storage_policy(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
})
# the reconciler gets "None" if we can't quorum the container
self._mock_oldest_spi({'c': None})
deleted_container_entries = self._run_once()
# we look for misplaced objects
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# but can't really say where to go looking
self.assertEqual(self.reconciler.stats['unavailable_container'], 1)
# we don't clean up anything
self.assertEqual(self.reconciler.stats['cleanup_object'], 0)
# and we definitely should not pop_queue
self.assertFalse(deleted_container_entries)
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
(1, "/AUTH_bob/c/o1"): 3618.84187,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# moves it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'),
('DELETE', '/v1/AUTH_bob/c/o1')])
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'),
('PUT', '/v1/AUTH_bob/c/o1')])
put_headers = self.fake_swift.storage_policy[0].headers[1]
# we PUT the object in the right place with q_ts + offset 2
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=2))
# cleans up the old
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=1))
# and when we're done, we pop the entry from the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_the_other_direction(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/0:/AUTH_bob/c/o1"): 3618.84187,
(0, "/AUTH_bob/c/o1"): 3618.84187,
})
self._mock_oldest_spi({'c': 1})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('0:/AUTH_bob/c/o1'))])
# moves it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('GET', '/v1/AUTH_bob/c/o1'), # 2
('DELETE', '/v1/AUTH_bob/c/o1')]) # 4
delete_headers = self.fake_swift.storage_policy[0].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
put_headers = self.fake_swift.storage_policy[1].headers[1]
# we PUT the object in the right place with q_ts + offset 2
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=2).internal)
# cleans up the old
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=1).internal)
# and when we're done, we pop the entry from the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '0:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_with_unicode_and_spaces(self):
# the "name" in listings and the unicode string passed to all
# functions where we call them with (account, container, obj)
obj_name = u"AUTH_bob/c \u062a/o1 \u062a"
# anytime we talk about a call made to swift for a path
obj_path = obj_name.encode('utf-8')
# this mock expects unquoted unicode because it handles container
# listings as well as paths
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/%s" % obj_name): 3618.84187,
(1, "/%s" % obj_name): 3618.84187,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
# listing_qs encodes and quotes - so give it name
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/%s' % obj_name))])
# moves it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
# these calls are to the real path
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/%s' % obj_path), # 2
('DELETE', '/v1/%s' % obj_path)]) # 4
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/%s' % obj_path), # 1
('PUT', '/v1/%s' % obj_path)]) # 3
put_headers = self.fake_swift.storage_policy[0].headers[1]
# we PUT the object in the right place with q_ts + offset 2
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=2).internal)
# cleans up the old
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=1).internal)
self.assertEqual(
delete_headers.get('X-Backend-Storage-Policy-Index'), '1')
# and when we're done, we pop the entry from the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
# this mock received the name, it's encoded down in buffered_http
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/%s' % obj_name)])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_delete(self):
q_ts = time.time()
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): (
Timestamp(q_ts).internal, 'application/x-delete'),
# object exists in "correct" storage policy - slightly older
(0, "/AUTH_bob/c/o1"): Timestamp(q_ts - 1).internal,
})
self._mock_oldest_spi({'c': 0})
# the tombstone exists in the enqueued storage policy
self.fake_swift.storage_policy[1].register(
'GET', '/v1/AUTH_bob/c/o1', swob.HTTPNotFound,
{'X-Backend-Timestamp': Timestamp(q_ts).internal})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# delete it
self.assertEqual(self.reconciler.stats['delete_attempt'], 1)
self.assertEqual(self.reconciler.stats['delete_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'),
('DELETE', '/v1/AUTH_bob/c/o1')])
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'),
('DELETE', '/v1/AUTH_bob/c/o1')])
reconcile_headers = self.fake_swift.storage_policy[0].headers[1]
# we DELETE the object in the right place with q_ts + offset 2
self.assertEqual(reconcile_headers.get('X-Timestamp'),
Timestamp(q_ts, offset=2).internal)
# cleans up the old
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(q_ts, offset=1))
# and when we're done, we pop the entry from the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_enqueued_for_the_correct_dest_noop(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
(1, "/AUTH_bob/c/o1"): 3618.84187,
})
self._mock_oldest_spi({'c': 1}) # already in the right spot!
deleted_container_entries = self._run_once()
# nothing to see here
self.assertEqual(self.reconciler.stats['noop_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# so we just pop the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_src_object_newer_than_queue_entry(self):
# setup the cluster
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3600.123456,
(1, '/AUTH_bob/c/o1'): 3600.234567, # slightly newer
})
self._mock_oldest_spi({'c': 0}) # destination
# turn the crank
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# proceed with the move
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'), # 2
('DELETE', '/v1/AUTH_bob/c/o1')]) # 4
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
# .. with source timestamp + offset 2
put_headers = self.fake_swift.storage_policy[0].headers[1]
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3600.234567, offset=2))
# src object is cleaned up
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# ... with q_ts + offset 1
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3600.123456, offset=1))
# and queue is popped
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_src_object_older_than_queue_entry(self):
# should be some sort of retry case
q_ts = time.time()
container = str(int(q_ts // 3600 * 3600))
q_path = '.misplaced_objects/%s' % container
self._mock_listing({
(None, "/%s/1:/AUTH_bob/c/o1" % q_path): q_ts,
(1, '/AUTH_bob/c/o1'): q_ts - 0.00001, # slightly older
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', '/v1/%s' % q_path + listing_qs('')),
('GET', '/v1/%s' % q_path +
listing_qs('1:/AUTH_bob/c/o1')),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# but no object copy is attempted
self.assertEqual(self.reconciler.stats['unavailable_source'], 1)
self.assertEqual(self.reconciler.stats['copy_attempt'], 0)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')])
# src object is un-modified
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# queue is un-changed, we'll have to retry
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_src_object_unavailable_with_slightly_newer_tombstone(self):
# should be some sort of retry case
q_ts = float(Timestamp(time.time()))
container = str(int(q_ts // 3600 * 3600))
q_path = '.misplaced_objects/%s' % container
self._mock_listing({
(None, "/%s/1:/AUTH_bob/c/o1" % q_path): q_ts,
})
self._mock_oldest_spi({'c': 0})
self.fake_swift.storage_policy[1].register(
'GET', '/v1/AUTH_bob/c/o1', swob.HTTPNotFound,
{'X-Backend-Timestamp': Timestamp(q_ts, offset=2).internal})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', '/v1/%s' % q_path + listing_qs('')),
('GET', '/v1/%s' % q_path +
listing_qs('1:/AUTH_bob/c/o1')),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# but no object copy is attempted
self.assertEqual(self.reconciler.stats['unavailable_source'], 1)
self.assertEqual(self.reconciler.stats['copy_attempt'], 0)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')])
# src object is un-modified
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# queue is un-changed, we'll have to retry
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_src_object_unavailable_server_error(self):
# should be some sort of retry case
q_ts = float(Timestamp(time.time()))
container = str(int(q_ts // 3600 * 3600))
q_path = '.misplaced_objects/%s' % container
self._mock_listing({
(None, "/%s/1:/AUTH_bob/c/o1" % q_path): q_ts,
})
self._mock_oldest_spi({'c': 0})
self.fake_swift.storage_policy[1].register(
'GET', '/v1/AUTH_bob/c/o1', swob.HTTPServiceUnavailable, {})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', '/v1/%s' % q_path + listing_qs('')),
('GET', '/v1/%s' % q_path +
listing_qs('1:/AUTH_bob/c/o1')),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# but no object copy is attempted
self.assertEqual(self.reconciler.stats['unavailable_source'], 1)
self.assertEqual(self.reconciler.stats['copy_attempt'], 0)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')])
# src object is un-modified
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# queue is un-changed, we'll have to retry
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move_fails_cleanup(self):
# setup the cluster
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3600.123456,
(1, '/AUTH_bob/c/o1'): 3600.123457, # slightly newer
})
self._mock_oldest_spi({'c': 0}) # destination
# make the DELETE blow up
self.fake_swift.storage_policy[1].register(
'DELETE', '/v1/AUTH_bob/c/o1', swob.HTTPServiceUnavailable, {})
# turn the crank
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# proceed with the move
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'), # 2
('DELETE', '/v1/AUTH_bob/c/o1')]) # 4
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
# .. with source timestamp + offset 2
put_headers = self.fake_swift.storage_policy[0].headers[1]
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3600.123457, offset=2))
# we try to cleanup
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
# ... with q_ts + offset 1
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3600.12346, offset=1))
# but cleanup fails!
self.assertEqual(self.reconciler.stats['cleanup_failed'], 1)
# so the queue is not popped
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
# and we'll have to retry
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move_src_object_is_forever_gone(self):
# oh boy, hate to be here - this is an oldy
q_ts = self.start_interval - self.reconciler.reclaim_age - 1
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): q_ts,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# but it's gone :\
self.assertEqual(self.reconciler.stats['lost_source'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')])
# gah, look, even if it was out there somewhere - we've been at this
# two weeks and haven't found it. We can't just keep looking forever,
# so... we're done
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
# dunno if this is helpful, but FWIW we don't throw tombstones?
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
self.assertEqual(self.reconciler.stats['success'], 1) # lol
def test_object_move_dest_already_moved(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3679.2019,
(1, "/AUTH_bob/c/o1"): 3679.2019,
(0, "/AUTH_bob/c/o1"): 3679.2019,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# we look for misplaced objects
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# but we found it already in the right place!
self.assertEqual(self.reconciler.stats['found_object'], 1)
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# so no attempt to read the source is made, but we do cleanup
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('DELETE', '/v1/AUTH_bob/c/o1')])
delete_headers = self.fake_swift.storage_policy[1].headers[0]
# rather we just clean up the dark matter
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3679.2019, offset=1))
# and wipe our hands of it
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_dest_object_newer_than_queue_entry(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3679.2019,
(1, "/AUTH_bob/c/o1"): 3679.2019,
(0, "/AUTH_bob/c/o1"): 3679.2019 + 0.00001, # slightly newer
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# we look for misplaced objects...
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# but we found it already in the right place!
self.assertEqual(self.reconciler.stats['found_object'], 1)
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# so not attempt to read is made, but we do cleanup
self.assertEqual(self.reconciler.stats['copy_attempt'], 0)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('DELETE', '/v1/AUTH_bob/c/o1')])
delete_headers = self.fake_swift.storage_policy[1].headers[0]
# rather we just clean up the dark matter
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3679.2019, offset=1))
# and since we cleaned up the old object, so this counts as done
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_dest_object_older_than_queue_entry(self):
self._mock_listing({
(None, "/.misplaced_objects/36000/1:/AUTH_bob/c/o1"): 36123.38393,
(1, "/AUTH_bob/c/o1"): 36123.38393,
(0, "/AUTH_bob/c/o1"): 36123.38393 - 0.00001, # slightly older
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# we found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('36000')),
('GET', '/v1/.misplaced_objects/36000' + listing_qs('')),
('GET', '/v1/.misplaced_objects/36000' +
listing_qs('1:/AUTH_bob/c/o1'))])
# and since our version is *newer*, we overwrite
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'), # 2
('DELETE', '/v1/AUTH_bob/c/o1')]) # 4
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
# ... with a q_ts + offset 2
put_headers = self.fake_swift.storage_policy[0].headers[1]
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(36123.38393, offset=2))
# then clean the dark matter
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# ... with a q_ts + offset 1
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(36123.38393, offset=1))
# and pop the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '36000', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_put_fails(self):
# setup the cluster
self._mock_listing({
(None, "/.misplaced_objects/36000/1:/AUTH_bob/c/o1"): 36123.383925,
(1, "/AUTH_bob/c/o1"): 36123.383925,
})
self._mock_oldest_spi({'c': 0})
# make the put to dest fail!
self.fake_swift.storage_policy[0].register(
'PUT', '/v1/AUTH_bob/c/o1', swob.HTTPServiceUnavailable, {})
# turn the crank
deleted_container_entries = self._run_once()
# we find a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('36000')),
('GET', '/v1/.misplaced_objects/36000' + listing_qs('')),
('GET', '/v1/.misplaced_objects/36000' +
listing_qs('1:/AUTH_bob/c/o1'))])
# and try to move it, but it fails
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')]) # 2
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
put_headers = self.fake_swift.storage_policy[0].headers[1]
# ...with q_ts + offset 2 (20-microseconds)
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(36123.383925, offset=2))
# but it failed
self.assertEqual(self.reconciler.stats['copy_success'], 0)
self.assertEqual(self.reconciler.stats['copy_failed'], 1)
# ... so we don't clean up the source
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# and we don't pop the queue
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['unhandled_errors'], 0)
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move_put_blows_up_crazy_town(self):
# setup the cluster
self._mock_listing({
(None, "/.misplaced_objects/36000/1:/AUTH_bob/c/o1"): 36123.383925,
(1, "/AUTH_bob/c/o1"): 36123.383925,
})
self._mock_oldest_spi({'c': 0})
# make the put to dest blow up crazy town
def blow_up(*args, **kwargs):
raise Exception('kaboom!')
self.fake_swift.storage_policy[0].register(
'PUT', '/v1/AUTH_bob/c/o1', blow_up, {})
# turn the crank
deleted_container_entries = self._run_once()
# we find a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('36000')),
('GET', '/v1/.misplaced_objects/36000' + listing_qs('')),
('GET', '/v1/.misplaced_objects/36000' +
listing_qs('1:/AUTH_bob/c/o1'))])
# and attempt to move it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')]) # 2
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
put_headers = self.fake_swift.storage_policy[0].headers[1]
# ...with q_ts + offset 2 (20-microseconds)
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(36123.383925, offset=2))
# but it blows up hard
self.assertEqual(self.reconciler.stats['unhandled_error'], 1)
# so we don't cleanup
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# and we don't pop the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move_no_such_object_no_tombstone_recent(self):
q_ts = float(Timestamp(time.time()))
container = str(int(q_ts // 3600 * 3600))
q_path = '.misplaced_objects/%s' % container
self._mock_listing({
(None, "/%s/1:/AUTH_jeb/c/o1" % q_path): q_ts
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
self.assertEqual(
self.fake_swift.calls,
[('GET', '/v1/.misplaced_objects/%s' % container + listing_qs('')),
('GET', '/v1/.misplaced_objects/%s' % container +
listing_qs('1:/AUTH_jeb/c/o1')),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_jeb/c/o1')],
)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_jeb/c/o1')],
)
# the queue entry is recent enough that there could easily be
# tombstones on offline nodes or something, so we'll just leave it
# here and try again later
self.assertEqual(deleted_container_entries, [])
def test_object_move_no_such_object_no_tombstone_ancient(self):
queue_ts = float(Timestamp(time.time())) - \
self.reconciler.reclaim_age * 1.1
container = str(int(queue_ts // 3600 * 3600))
self._mock_listing({
(
None, "/.misplaced_objects/%s/1:/AUTH_jeb/c/o1" % container
): queue_ts
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container)),
('GET', '/v1/.misplaced_objects/%s' % container + listing_qs('')),
('GET', '/v1/.misplaced_objects/%s' % container +
listing_qs('1:/AUTH_jeb/c/o1'))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_jeb/c/o1')],
)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_jeb/c/o1')],
)
# the queue entry is old enough that the tombstones, if any, have
# probably been reaped, so we'll just give up
self.assertEqual(
deleted_container_entries,
[('.misplaced_objects', container, '1:/AUTH_jeb/c/o1')])
def test_delete_old_empty_queue_containers(self):
ts = time.time() - self.reconciler.reclaim_age * 1.1
container = str(int(ts // 3600 * 3600))
older_ts = ts - 3600
older_container = str(int(older_ts // 3600 * 3600))
self._mock_listing({
(None, "/.misplaced_objects/%s/" % container): 0,
(None, "/.misplaced_objects/%s/something" % older_container): 0,
})
deleted_container_entries = self._run_once()
self.assertEqual(deleted_container_entries, [])
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container)),
('GET', '/v1/.misplaced_objects/%s' % container + listing_qs('')),
('DELETE', '/v1/.misplaced_objects/%s' % container),
('GET', '/v1/.misplaced_objects/%s' % older_container +
listing_qs('')),
('GET', '/v1/.misplaced_objects/%s' % older_container +
listing_qs('something'))])
self.assertEqual(self.reconciler.stats['invalid_record'], 1)
def test_iter_over_old_containers_in_reverse(self):
step = reconciler.MISPLACED_OBJECTS_CONTAINER_DIVISOR
now = self.start_interval
containers = []
for i in range(10):
container_ts = int(now - step * i)
container_name = str(container_ts // 3600 * 3600)
containers.append(container_name)
# add some old containers too
now -= self.reconciler.reclaim_age
old_containers = []
for i in range(10):
container_ts = int(now - step * i)
container_name = str(container_ts // 3600 * 3600)
old_containers.append(container_name)
containers.sort()
old_containers.sort()
all_containers = old_containers + containers
self._mock_listing(dict((
(None, "/.misplaced_objects/%s/" % container), 0
) for container in all_containers))
deleted_container_entries = self._run_once()
self.assertEqual(deleted_container_entries, [])
last_container = all_containers[-1]
account_listing_calls = [
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(last_container)),
]
new_container_calls = [
('GET', '/v1/.misplaced_objects/%s' % container +
listing_qs('')) for container in reversed(containers)
][1:] # current_container get's skipped the second time around...
old_container_listings = [
('GET', '/v1/.misplaced_objects/%s' % container +
listing_qs('')) for container in reversed(old_containers)
]
old_container_deletes = [
('DELETE', '/v1/.misplaced_objects/%s' % container)
for container in reversed(old_containers)
]
old_container_calls = list(itertools.chain(*zip(
old_container_listings, old_container_deletes)))
self.assertEqual(self.fake_swift.calls,
[('GET', self.current_container_path)] +
account_listing_calls + new_container_calls +
old_container_calls)
def test_error_in_iter_containers(self):
self._mock_listing({})
# make the listing return an error
self.fake_swift.storage_policy[None].register(
'GET', '/v1/.misplaced_objects' + listing_qs(''),
swob.HTTPServiceUnavailable, {})
self._run_once()
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs(''))])
self.assertEqual(self.reconciler.stats, {})
errors = self.reconciler.logger.get_lines_for_level('error')
self.assertEqual(errors, [
'Error listing containers in account '
'.misplaced_objects (Unexpected response: '
'503 Service Unavailable)'])
def test_unhandled_exception_in_reconcile(self):
self._mock_listing({})
# make the listing blow up
def blow_up(*args, **kwargs):
raise Exception('kaboom!')
self.fake_swift.storage_policy[None].register(
'GET', '/v1/.misplaced_objects' + listing_qs(''),
blow_up, {})
self._run_once()
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs(''))])
self.assertEqual(self.reconciler.stats, {})
errors = self.reconciler.logger.get_lines_for_level('error')
self.assertEqual(errors,
['Unhandled Exception trying to reconcile: '])
if __name__ == '__main__':
unittest.main()
|
rootchina/python_koans
|
refs/heads/master
|
python2/koans/about_new_style_classes.py
|
81
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutNewStyleClasses(Koan):
class OldStyleClass:
"An old style class"
# Original class style have been phased out in Python 3.
class NewStyleClass(object):
"A new style class"
# Introduced in Python 2.2
#
# Aside from this set of tests, Python Koans sticks exclusively to this
# kind of class
pass
def test_new_style_classes_inherit_from_object_base_class(self):
self.assertEqual(____, issubclass(self.NewStyleClass, object))
self.assertEqual(____, issubclass(self.OldStyleClass, object))
def test_new_style_classes_have_more_attributes(self):
self.assertEqual(__, len(dir(self.OldStyleClass)))
self.assertEqual(__, self.OldStyleClass.__doc__)
self.assertEqual(__, self.OldStyleClass.__module__)
self.assertEqual(__, len(dir(self.NewStyleClass)))
# To examine the available attributes, run
# 'dir(<Class name goes here>)'
# from a python console
# ------------------------------------------------------------------
def test_old_style_classes_have_type_but_no_class_attribute(self):
self.assertEqual(__, type(self.OldStyleClass).__name__)
try:
cls = self.OldStyleClass.__class__.__name__
except Exception as ex:
pass
# What was that error message from the exception?
self.assertMatch(__, ex[0])
def test_new_style_classes_have_same_class_as_type(self):
new_style = self.NewStyleClass()
self.assertEqual(__, self.NewStyleClass.__class__)
self.assertEqual(
__,
type(self.NewStyleClass) == self.NewStyleClass.__class__)
# ------------------------------------------------------------------
def test_in_old_style_instances_class_is_different_to_type(self):
old_style = self.OldStyleClass()
self.assertEqual(__, old_style.__class__.__name__)
self.assertEqual(__, type(old_style).__name__)
def test_new_style_instances_have_same_class_as_type(self):
new_style = self.NewStyleClass()
self.assertEqual(__, new_style.__class__.__name__)
self.assertEqual(__, type(new_style) == new_style.__class__)
|
TileHalo/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/pywebsocket/src/example/echo_wsh.py
|
494
|
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
_GOODBYE_MESSAGE = u'Goodbye'
def web_socket_do_extra_handshake(request):
# This example handler accepts any request. See origin_check_wsh.py for how
# to reject access from untrusted scripts based on origin value.
pass # Always accept.
def web_socket_transfer_data(request):
while True:
line = request.ws_stream.receive_message()
if line is None:
return
if isinstance(line, unicode):
request.ws_stream.send_message(line, binary=False)
if line == _GOODBYE_MESSAGE:
return
else:
request.ws_stream.send_message(line, binary=True)
# vi:sts=4 sw=4 et
|
agilman/flask-template
|
refs/heads/master
|
app/__init__.py
|
1
|
from flask import Flask
from app import config
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = config.sqlalchemy_database_uri
app.secret_key = config.app_secret_key
from app import models
from app import api
from app import views
from app import auth
|
heikoheiko/pydevp2p
|
refs/heads/master
|
devp2p/tests/test_crypto.py
|
2
|
# -*- coding: utf-8 -*-
from devp2p import crypto
import random
def get_ecc(secret=''):
return crypto.ECCx(raw_privkey=crypto.mk_privkey(secret))
def test_valid_ecc():
for i in range(100):
e = get_ecc()
assert len(e.raw_pubkey) == 64
assert e.is_valid_key(e.raw_pubkey)
assert e.is_valid_key(e.raw_pubkey, e.raw_privkey)
pubkey = '\x00' * 64
assert not e.is_valid_key(pubkey)
def test_asymetric():
bob = get_ecc('secret2')
# enc / dec
plaintext = "Hello Bob"
ciphertext = crypto.encrypt(plaintext, bob.raw_pubkey)
assert bob.decrypt(ciphertext) == plaintext
def test_signature():
bob = get_ecc('secret2')
# sign
message = "Hello Alice"
signature = bob.sign(message)
# verify signature
assert crypto.verify(bob.raw_pubkey, signature, message) is True
assert crypto.ECCx(raw_pubkey=bob.raw_pubkey).verify(signature, message) is True
# wrong signature
message = "Hello Alicf"
assert crypto.ECCx(raw_pubkey=bob.raw_pubkey).verify(signature, message) is False
assert crypto.verify(bob.raw_pubkey, signature, message) is False
def test_recover():
alice = get_ecc('secret1')
message = 'hello bob'
signature = alice.sign(message)
assert len(signature) == 65
assert crypto.verify(alice.raw_pubkey, signature, message) is True
recovered_pubkey = crypto.ecdsa_recover(message, signature)
assert len(recovered_pubkey) == 64
assert alice.raw_pubkey == recovered_pubkey
def test_get_ecdh_key():
privkey = "332143e9629eedff7d142d741f896258f5a1bfab54dab2121d3ec5000093d74b".decode('hex')
remote_pubkey = "f0d2b97981bd0d415a843b5dfe8ab77a30300daab3658c578f2340308a2da1a07f0821367332598b6aa4e180a41e92f4ebbae3518da847f0b1c0bbfe20bcf4e1".decode(
'hex')
agree_expected = "ee1418607c2fcfb57fda40380e885a707f49000a5dda056d828b7d9bd1f29a08".decode(
'hex')
e = crypto.ECCx(raw_privkey=privkey)
agree = e.get_ecdh_key(remote_pubkey)
assert agree == agree_expected
def test_en_decrypt():
alice = crypto.ECCx()
bob = crypto.ECCx()
msg = 'test'
ciphertext = alice.encrypt(msg, bob.raw_pubkey)
assert bob.decrypt(ciphertext) == msg
def test_privtopub():
priv = crypto.mk_privkey('test')
pub = crypto.privtopub(priv)
pub2 = crypto.ECCx(raw_privkey=priv).raw_pubkey
assert pub == pub2
def recover_1kb(times=1000):
alice = get_ecc('secret1')
message = ''.join(chr(random.randrange(0, 256)) for i in range(1024))
signature = alice.sign(message)
for i in range(times):
recovered_pubkey = crypto.ecdsa_recover(message, signature)
assert recovered_pubkey == alice.raw_pubkey
def test_recover2():
recover_1kb(times=1)
if __name__ == '__main__':
import time
st = time.time()
times = 100
recover_1kb(times=times)
print 'took %.5f per recovery' % ((time.time() - st) / times)
|
sunil07t/e-mission-server
|
refs/heads/master
|
emission/simulation/trip_gen.py
|
1
|
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import *
from past.utils import old_div
from builtins import object
import random
import math
import json
import datetime
import urllib.request, urllib.error, urllib.parse
import sys
# Our imports
from emission.net.ext_service.otp.otp import OTP, PathNotFoundException
from emission.core.wrapper.trip_old import Coordinate
import emission.simulation.markov_model_counter as esmmc
import emission.net.ext_service.geocoder.nominatim as enn
import emission.core.get_database as edb
import emission.core.wrapper.trip as ecwt
import emission.core.wrapper.section as ecws
class Address(object):
## This class exists only for caching purposes
## So we don't have to call google maps a million times
def __init__(self, address):
self.text = address
self.cord = None
def __str__(self):
return self.text
class Creator(object):
def __init__(self, new=False):
self.new = new
self.starting_points = [ ]
self.ending_points = [ ]
self.a_to_b = [ ]
self.num_trips = None
self.radius = None
self.amount_missed = 0
self.starting_counter = esmmc.Counter( )
self.ending_counter = esmmc.Counter( )
self.mode_counter = esmmc.Counter( )
self.prog_bar = ""
def set_up(self):
city_file = open("emission/simulation/input.json", "r") ## User (Naomi) specifies locations and radius they want
jsn = json.load(city_file)
self.num_trips = jsn["number of trips"]
self.radius = float(jsn["radius"])
for place, weight in jsn['starting centroids'].items():
self.starting_counter[Address(place)] = weight
for place, weight in jsn['ending centroids'].items():
self.ending_counter[Address(place)] = weight
for mode, weight in jsn['modes'].items():
self.mode_counter[mode] = weight
city_file.close()
def get_starting_ending_points(self):
for _ in range(self.num_trips):
start_addr = esmmc.sampleFromCounter(self.starting_counter)
end_addr = esmmc.sampleFromCounter(self.ending_counter)
self.starting_points.append(get_one_random_point_in_radius(start_addr, self.radius))
self.ending_points.append(get_one_random_point_in_radius(end_addr, self.radius))
def make_a_to_b(self):
for _ in range(self.num_trips): ## Based on very rough estimate of how many of these end up in the ocean
start_index = random.randint(0, len(self.starting_points) - 1)
end_index = random.randint(0, len(self.ending_points) - 1)
starting_point = self.starting_points[start_index]
ending_point = self.ending_points[end_index]
to_add = ( starting_point, ending_point )
self.a_to_b.append(to_add)
def get_trips_from_a_to_b(self, user_id):
curr_time = datetime.datetime.now()
curr_month = curr_time.month
curr_year = curr_time.year
curr_minute = curr_time.minute
for i in range(len(self.a_to_b)):
curr_day = random.randint(1, 28)
curr_hour = random.randint(0, 23)
t = self.a_to_b[i]
mode = esmmc.sampleFromCounter(self.mode_counter) ## Unsophisticated mode choice, Alexi would throw up
try:
self.prog_bar += "."
print(self.prog_bar)
rand_trip_id = random.random()
rand_user_id = user_id if user_id else random.random()
otp_trip = OTP(t[0], t[1], mode, write_day(curr_month, curr_day, curr_year), write_time(curr_hour, curr_minute), True)
if self.new:
print("here")
otp_trip.turn_into_new_trip(user_id)
else:
alt_trip = otp_trip.turn_into_trip("%s%s" % (rand_user_id, rand_trip_id), rand_user_id, rand_trip_id, True) ## ids
save_trip_to_db(alt_trip)
except PathNotFoundException:
print("path not found")
self.amount_missed += 1
except urllib.error.HTTPError:
print("server error")
pass
def save_trip_to_db(trip):
print("saving trip to db")
print(trip.user_id)
db = edb.get_trip_db()
print("RHRHRH")
print("start loc = %s" % trip.trip_start_location.coordinate_list())
print("end loc = %s" % trip.trip_end_location.coordinate_list())
db.insert({"_id": trip._id, "user_id": trip.user_id, "trip_id": trip.trip_id, "type" : "move", "sections": list(range(len(trip.sections))), "trip_start_datetime": trip.start_time.datetime,
"trip_end_datetime": trip.end_time.datetime, "trip_start_location": trip.trip_start_location.coordinate_list(),
"trip_end_location": trip.trip_end_location.coordinate_list(), "mode_list": trip.mode_list})
print("len(trip.sections) in trip gen is %s" % len(trip.sections))
for section in trip.sections:
save_section_to_db(section)
def save_section_to_db(section):
print("saving section to db")
db = edb.get_section_db()
db.insert({"user_id" : section.user_id, "trip_id" : section.trip_id, "distance" : section.distance, "type" : section.section_type,
"section_start_datetime" : section.start_time.datetime, "section_end_datetime" : section.end_time.datetime,
"section_start_point" : {"coordinates" : section.section_start_location.coordinate_list()},
"section_end_point" : {"coordinates" : section.section_end_location.coordinate_list()}, "mode" : section.mode, "confirmed_mode" : section.confirmed_mode})
def geocode_address(address):
if address.cord is None:
business_geocoder = enn.Geocoder()
results = business_geocoder.geocode(address.text)
address.cord = results
else:
results = address.cord
return results
def generate_random_locations_in_radius(address, radius, num_points):
# Input the desired radius in kilometers
locations = [ ]
for _ in range(num_points):
loc = get_one_random_point_in_radius(address, radius)
locations.append(loc)
return locations
def get_one_random_point_in_radius(address, radius):
# From https://gis.stackexchange.com/questions/25877/how-to-generate-random-locations-nearby-my-location
crd = geocode_address(address)
radius_in_degrees = kilometers_to_degrees(radius)
x_0 = crd.get_lon()
y_0 = crd.get_lat()
u = random.random()
v = random.random()
w = radius_in_degrees * math.sqrt(u)
t = 2 * math.pi * v
x = w * math.cos(t)
y = w * math.sin(t)
x = old_div(float(x), float(math.cos(y_0))) # To account for Earth curvature stuff
to_return = Coordinate(y + y_0, x + x_0)
return to_return
def kilometers_to_degrees(km):
## From stackexchnage mentioned above
return (old_div(float(km),float(40000))) * 360
def write_day(month, day, year):
return "%s-%s-%s" % (month, day, year)
def write_time(hour, minute):
return "%s:%s" % (hour, minute)
def create_fake_trips(user_name=None, new=False):
### This is the main function, its the only thing you need to run
my_creator = Creator(new)
my_creator.set_up()
my_creator.get_starting_ending_points()
my_creator.make_a_to_b()
my_creator.get_trips_from_a_to_b(user_name)
return my_creator
if __name__ == "__main__":
user_id = sys.argv[1]
create_fake_trips(user_id, True)
|
ThinkOpen-Solutions/odoo
|
refs/heads/stable
|
addons/hr_timesheet_sheet/wizard/__init__.py
|
443
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_timesheet_current
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
nesdis/djongo
|
refs/heads/master
|
tests/django_tests/tests/v22/tests/postgres_tests/array_default_migrations/0002_integerarraymodel_field_2.py
|
133
|
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('postgres_tests', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='integerarraydefaultmodel',
name='field_2',
field=django.contrib.postgres.fields.ArrayField(models.IntegerField(), default=[], size=None),
preserve_default=False,
),
]
|
gigq/flasktodo
|
refs/heads/master
|
werkzeug/local.py
|
25
|
# -*- coding: utf-8 -*-
"""
werkzeug.local
~~~~~~~~~~~~~~
This module implements context-local objects.
:copyright: (c) 2010 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
from greenlet import getcurrent as get_current_greenlet
except ImportError: # pragma: no cover
try:
from py.magic import greenlet
get_current_greenlet = greenlet.getcurrent
del greenlet
except:
# catch all, py.* fails with so many different errors.
get_current_greenlet = int
try:
from thread import get_ident as get_current_thread, allocate_lock
except ImportError: # pragma: no cover
from dummy_thread import get_ident as get_current_thread, allocate_lock
from werkzeug.wsgi import ClosingIterator
from werkzeug._internal import _patch_wrapper
# get the best ident function. if greenlets are not installed we can
# safely just use the builtin thread function and save a python methodcall
# and the cost of calculating a hash.
if get_current_greenlet is int: # pragma: no cover
get_ident = get_current_thread
else:
get_ident = lambda: (get_current_thread(), get_current_greenlet())
def release_local(local):
"""Releases the contents of the local for the current context.
This makes it possible to use locals without a manager.
Example::
>>> loc = Local()
>>> loc.foo = 42
>>> release_local(loc)
>>> hasattr(loc, 'foo')
False
With this function one can release :class:`Local` objects as well
as :class:`StackLocal` objects. However it is not possible to
release data held by proxies that way, one always has to retain
a reference to the underlying local object in order to be able
to release it.
.. versionadded:: 0.6.1
"""
local.__release_local__()
class Local(object):
__slots__ = ('__storage__', '__lock__')
def __init__(self):
object.__setattr__(self, '__storage__', {})
object.__setattr__(self, '__lock__', allocate_lock())
def __iter__(self):
return self.__storage__.iteritems()
def __call__(self, proxy):
"""Create a proxy for a name."""
return LocalProxy(self, proxy)
def __release_local__(self):
self.__storage__.pop(get_ident(), None)
def __getattr__(self, name):
self.__lock__.acquire()
try:
try:
return self.__storage__[get_ident()][name]
except KeyError:
raise AttributeError(name)
finally:
self.__lock__.release()
def __setattr__(self, name, value):
self.__lock__.acquire()
try:
ident = get_ident()
storage = self.__storage__
if ident in storage:
storage[ident][name] = value
else:
storage[ident] = {name: value}
finally:
self.__lock__.release()
def __delattr__(self, name):
self.__lock__.acquire()
try:
try:
del self.__storage__[get_ident()][name]
except KeyError:
raise AttributeError(name)
finally:
self.__lock__.release()
class LocalStack(object):
"""This class works similar to a :class:`Local` but keeps a stack
of objects instead. This is best explained with an example::
>>> ls = LocalStack()
>>> ls.push(42)
>>> ls.top
42
>>> ls.push(23)
>>> ls.top
23
>>> ls.pop()
23
>>> ls.top
42
They can be force released by using a :class:`LocalManager` or with
the :func:`release_local` function but the correct way is to pop the
item from the stack after using. When the stack is empty it will
no longer be bound to the current context (and as such released).
By calling the stack without arguments it returns a proxy that resolves to
the topmost item on the stack.
.. versionadded:: 0.6.1
"""
def __init__(self):
self._local = Local()
self._lock = allocate_lock()
def __release_local__(self):
self._local.__release_local__()
def __call__(self):
def _lookup():
rv = self.top
if rv is None:
raise RuntimeError('object unbound')
return rv
return LocalProxy(_lookup)
def push(self, obj):
"""Pushes a new item to the stack"""
self._lock.acquire()
try:
rv = getattr(self._local, 'stack', None)
if rv is None:
self._local.stack = rv = []
rv.append(obj)
return rv
finally:
self._lock.release()
def pop(self):
"""Removes the topmost item from the stack, will return the
old value or `None` if the stack was already empty.
"""
self._lock.acquire()
try:
stack = getattr(self._local, 'stack', None)
if stack is None:
return None
elif len(stack) == 1:
release_local(self._local)
return stack[-1]
else:
return stack.pop()
finally:
self._lock.release()
@property
def top(self):
"""The topmost item on the stack. If the stack is empty,
`None` is returned.
"""
try:
return self._local.stack[-1]
except (AttributeError, IndexError):
return None
class LocalManager(object):
"""Local objects cannot manage themselves. For that you need a local
manager. You can pass a local manager multiple locals or add them later
by appending them to `manager.locals`. Everytime the manager cleans up
it, will clean up all the data left in the locals for this context.
.. versionchanged:: 0.6.1
Instead of a manager the :func:`release_local` function can be used
as well.
"""
def __init__(self, locals=None):
if locals is None:
self.locals = []
elif isinstance(locals, Local):
self.locals = [locals]
else:
self.locals = list(locals)
def get_ident(self):
"""Return the context identifier the local objects use internally for
this context. You cannot override this method to change the behavior
but use it to link other context local objects (such as SQLAlchemy's
scoped sessions) to the Werkzeug locals.
"""
return get_ident()
def cleanup(self):
"""Manually clean up the data in the locals for this context. Call
this at the end of the request or use `make_middleware()`.
"""
ident = self.get_ident()
for local in self.locals:
release_local(local)
def make_middleware(self, app):
"""Wrap a WSGI application so that cleaning up happens after
request end.
"""
def application(environ, start_response):
return ClosingIterator(app(environ, start_response), self.cleanup)
return application
def middleware(self, func):
"""Like `make_middleware` but for decorating functions.
Example usage::
@manager.middleware
def application(environ, start_response):
...
The difference to `make_middleware` is that the function passed
will have all the arguments copied from the inner application
(name, docstring, module).
"""
return _patch_wrapper(func, self.make_middleware(func))
def __repr__(self):
return '<%s storages: %d>' % (
self.__class__.__name__,
len(self.locals)
)
class LocalProxy(object):
"""Acts as a proxy for a werkzeug local. Forwards all operations to
a proxied object. The only operations not supported for forwarding
are right handed operands and any kind of assignment.
Example usage::
from werkzeug import Local
l = Local()
# these are proxies
request = l('request')
user = l('user')
from werkzeug import LocalStack
_response_local = LocalStack()
# this is a proxy
response = _response_local()
Whenever something is bound to l.user / l.request the proxy objects
will forward all operations. If no object is bound a :exc:`RuntimeError`
will be raised.
To create proxies to :class:`Local` or :class:`LocalStack` objects,
call the object as shown above. If you want to have a proxy to an
object looked up by a function, you can (as of Werkzeug 0.6.1) pass
a function to the :class:`LocalProxy` constructor::
session = LocalProxy(lambda: get_current_request().session)
.. versionchanged:: 0.6.1
The class can be instanciated with a callable as well now.
"""
__slots__ = ('__local', '__dict__', '__name__')
def __init__(self, local, name=None):
object.__setattr__(self, '_LocalProxy__local', local)
object.__setattr__(self, '__name__', name)
def _get_current_object(self):
"""Return the current object. This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
"""
if not hasattr(self.__local, '__release_local__'):
return self.__local()
try:
return getattr(self.__local, self.__name__)
except AttributeError:
raise RuntimeError('no object bound to %s' % self.__name__)
@property
def __dict__(self):
try:
return self._get_current_object().__dict__
except RuntimeError:
return AttributeError('__dict__')
def __repr__(self):
try:
obj = self._get_current_object()
except RuntimeError:
return '<%s unbound>' % self.__class__.__name__
return repr(obj)
def __nonzero__(self):
try:
return bool(self._get_current_object())
except RuntimeError:
return False
def __unicode__(self):
try:
return unicode(self._get_current_object())
except RuntimeError:
return repr(self)
def __dir__(self):
try:
return dir(self._get_current_object())
except RuntimeError:
return []
def __getattr__(self, name):
if name == '__members__':
return dir(self._get_current_object())
return getattr(self._get_current_object(), name)
def __setitem__(self, key, value):
self._get_current_object()[key] = value
def __delitem__(self, key):
del self._get_current_object()[key]
def __setslice__(self, i, j, seq):
self._get_current_object()[i:j] = seq
def __delslice__(self, i, j):
del self._get_current_object()[i:j]
__setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v)
__delattr__ = lambda x, n: delattr(x._get_current_object(), n)
__str__ = lambda x: str(x._get_current_object())
__lt__ = lambda x, o: x._get_current_object() < o
__le__ = lambda x, o: x._get_current_object() <= o
__eq__ = lambda x, o: x._get_current_object() == o
__ne__ = lambda x, o: x._get_current_object() != o
__gt__ = lambda x, o: x._get_current_object() > o
__ge__ = lambda x, o: x._get_current_object() >= o
__cmp__ = lambda x, o: cmp(x._get_current_object(), o)
__hash__ = lambda x: hash(x._get_current_object())
__call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw)
__len__ = lambda x: len(x._get_current_object())
__getitem__ = lambda x, i: x._get_current_object()[i]
__iter__ = lambda x: iter(x._get_current_object())
__contains__ = lambda x, i: i in x._get_current_object()
__getslice__ = lambda x, i, j: x._get_current_object()[i:j]
__add__ = lambda x, o: x._get_current_object() + o
__sub__ = lambda x, o: x._get_current_object() - o
__mul__ = lambda x, o: x._get_current_object() * o
__floordiv__ = lambda x, o: x._get_current_object() // o
__mod__ = lambda x, o: x._get_current_object() % o
__divmod__ = lambda x, o: x._get_current_object().__divmod__(o)
__pow__ = lambda x, o: x._get_current_object() ** o
__lshift__ = lambda x, o: x._get_current_object() << o
__rshift__ = lambda x, o: x._get_current_object() >> o
__and__ = lambda x, o: x._get_current_object() & o
__xor__ = lambda x, o: x._get_current_object() ^ o
__or__ = lambda x, o: x._get_current_object() | o
__div__ = lambda x, o: x._get_current_object().__div__(o)
__truediv__ = lambda x, o: x._get_current_object().__truediv__(o)
__neg__ = lambda x: -(x._get_current_object())
__pos__ = lambda x: +(x._get_current_object())
__abs__ = lambda x: abs(x._get_current_object())
__invert__ = lambda x: ~(x._get_current_object())
__complex__ = lambda x: complex(x._get_current_object())
__int__ = lambda x: int(x._get_current_object())
__long__ = lambda x: long(x._get_current_object())
__float__ = lambda x: float(x._get_current_object())
__oct__ = lambda x: oct(x._get_current_object())
__hex__ = lambda x: hex(x._get_current_object())
__index__ = lambda x: x._get_current_object().__index__()
__coerce__ = lambda x, o: x.__coerce__(x, o)
__enter__ = lambda x: x.__enter__()
__exit__ = lambda x, *a, **kw: x.__exit__(*a, **kw)
|
rbaumg/trac
|
refs/heads/trunk
|
trac/upgrades/db35.py
|
1
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2019 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from trac.db import Table, Column, Index, DatabaseManager
def do_upgrade(env, version, cursor):
"""Add the notify_watch table."""
table = Table('notify_watch', key='id')[
Column('id', auto_increment=True),
Column('sid'),
Column('authenticated', type='int'),
Column('class'),
Column('realm'),
Column('target'),
Index(['sid', 'authenticated', 'class']),
Index(['class', 'realm', 'target'])]
DatabaseManager(env).create_tables([table])
|
jereze/scikit-learn
|
refs/heads/master
|
examples/linear_model/plot_sgd_comparison.py
|
77
|
"""
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import LogisticRegression
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
("SAG", LogisticRegression(solver='sag', tol=1e-1, C=1.e4 / X.shape[0]))
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
print("training %s" % name)
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
|
tpodowd/boto
|
refs/heads/master
|
boto/s3/key.py
|
72
|
# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Nexenta Systems Inc.
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import email.utils
import errno
import hashlib
import mimetypes
import os
import re
import base64
import binascii
import math
from hashlib import md5
import boto.utils
from boto.compat import BytesIO, six, urllib, encodebytes
from boto.exception import BotoClientError
from boto.exception import StorageDataError
from boto.exception import PleaseRetryException
from boto.provider import Provider
from boto.s3.keyfile import KeyFile
from boto.s3.user import User
from boto import UserAgent
from boto.utils import compute_md5, compute_hash
from boto.utils import find_matching_headers
from boto.utils import merge_headers_by_name
class Key(object):
"""
Represents a key (object) in an S3 bucket.
:ivar bucket: The parent :class:`boto.s3.bucket.Bucket`.
:ivar name: The name of this Key object.
:ivar metadata: A dictionary containing user metadata that you
wish to store with the object or that has been retrieved from
an existing object.
:ivar cache_control: The value of the `Cache-Control` HTTP header.
:ivar content_type: The value of the `Content-Type` HTTP header.
:ivar content_encoding: The value of the `Content-Encoding` HTTP header.
:ivar content_disposition: The value of the `Content-Disposition` HTTP
header.
:ivar content_language: The value of the `Content-Language` HTTP header.
:ivar etag: The `etag` associated with this object.
:ivar last_modified: The string timestamp representing the last
time this object was modified in S3.
:ivar owner: The ID of the owner of this object.
:ivar storage_class: The storage class of the object. Currently, one of:
STANDARD | REDUCED_REDUNDANCY | GLACIER
:ivar md5: The MD5 hash of the contents of the object.
:ivar size: The size, in bytes, of the object.
:ivar version_id: The version ID of this object, if it is a versioned
object.
:ivar encrypted: Whether the object is encrypted while at rest on
the server.
"""
DefaultContentType = 'application/octet-stream'
RestoreBody = """<?xml version="1.0" encoding="UTF-8"?>
<RestoreRequest xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Days>%s</Days>
</RestoreRequest>"""
BufferSize = boto.config.getint('Boto', 'key_buffer_size', 8192)
# The object metadata fields a user can set, other than custom metadata
# fields (i.e., those beginning with a provider-specific prefix like
# x-amz-meta).
base_user_settable_fields = set(["cache-control", "content-disposition",
"content-encoding", "content-language",
"content-md5", "content-type",
"x-robots-tag", "expires"])
_underscore_base_user_settable_fields = set()
for f in base_user_settable_fields:
_underscore_base_user_settable_fields.add(f.replace('-', '_'))
# Metadata fields, whether user-settable or not, other than custom
# metadata fields (i.e., those beginning with a provider specific prefix
# like x-amz-meta).
base_fields = (base_user_settable_fields |
set(["last-modified", "content-length", "date", "etag"]))
def __init__(self, bucket=None, name=None):
self.bucket = bucket
self.name = name
self.metadata = {}
self.cache_control = None
self.content_type = self.DefaultContentType
self.content_encoding = None
self.content_disposition = None
self.content_language = None
self.filename = None
self.etag = None
self.is_latest = False
self.last_modified = None
self.owner = None
self._storage_class = None
self.path = None
self.resp = None
self.mode = None
self.size = None
self.version_id = None
self.source_version_id = None
self.delete_marker = False
self.encrypted = None
# If the object is being restored, this attribute will be set to True.
# If the object is restored, it will be set to False. Otherwise this
# value will be None. If the restore is completed (ongoing_restore =
# False), the expiry_date will be populated with the expiry date of the
# restored object.
self.ongoing_restore = None
self.expiry_date = None
self.local_hashes = {}
def __repr__(self):
if self.bucket:
name = u'<Key: %s,%s>' % (self.bucket.name, self.name)
else:
name = u'<Key: None,%s>' % self.name
# Encode to bytes for Python 2 to prevent display decoding issues
if not isinstance(name, str):
name = name.encode('utf-8')
return name
def __iter__(self):
return self
@property
def provider(self):
provider = None
if self.bucket and self.bucket.connection:
provider = self.bucket.connection.provider
return provider
def _get_key(self):
return self.name
def _set_key(self, value):
self.name = value
key = property(_get_key, _set_key);
def _get_md5(self):
if 'md5' in self.local_hashes and self.local_hashes['md5']:
return binascii.b2a_hex(self.local_hashes['md5'])
def _set_md5(self, value):
if value:
self.local_hashes['md5'] = binascii.a2b_hex(value)
elif 'md5' in self.local_hashes:
self.local_hashes.pop('md5', None)
md5 = property(_get_md5, _set_md5);
def _get_base64md5(self):
if 'md5' in self.local_hashes and self.local_hashes['md5']:
md5 = self.local_hashes['md5']
if not isinstance(md5, bytes):
md5 = md5.encode('utf-8')
return binascii.b2a_base64(md5).decode('utf-8').rstrip('\n')
def _set_base64md5(self, value):
if value:
if not isinstance(value, six.string_types):
value = value.decode('utf-8')
self.local_hashes['md5'] = binascii.a2b_base64(value)
elif 'md5' in self.local_hashes:
del self.local_hashes['md5']
base64md5 = property(_get_base64md5, _set_base64md5);
def _get_storage_class(self):
if self._storage_class is None and self.bucket:
# Attempt to fetch storage class
list_items = list(self.bucket.list(self.name.encode('utf-8')))
if len(list_items) and getattr(list_items[0], '_storage_class',
None):
self._storage_class = list_items[0]._storage_class
else:
# Key is not yet saved? Just use default...
self._storage_class = 'STANDARD'
return self._storage_class
def _set_storage_class(self, value):
self._storage_class = value
storage_class = property(_get_storage_class, _set_storage_class)
def get_md5_from_hexdigest(self, md5_hexdigest):
"""
A utility function to create the 2-tuple (md5hexdigest, base64md5)
from just having a precalculated md5_hexdigest.
"""
digest = binascii.unhexlify(md5_hexdigest)
base64md5 = encodebytes(digest)
if base64md5[-1] == '\n':
base64md5 = base64md5[0:-1]
return (md5_hexdigest, base64md5)
def handle_encryption_headers(self, resp):
provider = self.bucket.connection.provider
if provider.server_side_encryption_header:
self.encrypted = resp.getheader(
provider.server_side_encryption_header, None)
else:
self.encrypted = None
def handle_version_headers(self, resp, force=False):
provider = self.bucket.connection.provider
# If the Key object already has a version_id attribute value, it
# means that it represents an explicit version and the user is
# doing a get_contents_*(version_id=<foo>) to retrieve another
# version of the Key. In that case, we don't really want to
# overwrite the version_id in this Key object. Comprende?
if self.version_id is None or force:
self.version_id = resp.getheader(provider.version_id, None)
self.source_version_id = resp.getheader(provider.copy_source_version_id,
None)
if resp.getheader(provider.delete_marker, 'false') == 'true':
self.delete_marker = True
else:
self.delete_marker = False
def handle_restore_headers(self, response):
provider = self.bucket.connection.provider
header = response.getheader(provider.restore_header)
if header is None:
return
parts = header.split(',', 1)
for part in parts:
key, val = [i.strip() for i in part.split('=')]
val = val.replace('"', '')
if key == 'ongoing-request':
self.ongoing_restore = True if val.lower() == 'true' else False
elif key == 'expiry-date':
self.expiry_date = val
def handle_addl_headers(self, headers):
"""
Used by Key subclasses to do additional, provider-specific
processing of response headers. No-op for this base class.
"""
pass
def open_read(self, headers=None, query_args='',
override_num_retries=None, response_headers=None):
"""
Open this key for reading
:type headers: dict
:param headers: Headers to pass in the web request
:type query_args: string
:param query_args: Arguments to pass in the query string
(ie, 'torrent')
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
"""
if self.resp is None:
self.mode = 'r'
provider = self.bucket.connection.provider
self.resp = self.bucket.connection.make_request(
'GET', self.bucket.name, self.name, headers,
query_args=query_args,
override_num_retries=override_num_retries)
if self.resp.status < 199 or self.resp.status > 299:
body = self.resp.read()
raise provider.storage_response_error(self.resp.status,
self.resp.reason, body)
response_headers = self.resp.msg
self.metadata = boto.utils.get_aws_metadata(response_headers,
provider)
for name, value in response_headers.items():
# To get correct size for Range GETs, use Content-Range
# header if one was returned. If not, use Content-Length
# header.
if (name.lower() == 'content-length' and
'Content-Range' not in response_headers):
self.size = int(value)
elif name.lower() == 'content-range':
end_range = re.sub('.*/(.*)', '\\1', value)
self.size = int(end_range)
elif name.lower() in Key.base_fields:
self.__dict__[name.lower().replace('-', '_')] = value
self.handle_version_headers(self.resp)
self.handle_encryption_headers(self.resp)
self.handle_restore_headers(self.resp)
self.handle_addl_headers(self.resp.getheaders())
def open_write(self, headers=None, override_num_retries=None):
"""
Open this key for writing.
Not yet implemented
:type headers: dict
:param headers: Headers to pass in the write request
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying PUT.
"""
raise BotoClientError('Not Implemented')
def open(self, mode='r', headers=None, query_args=None,
override_num_retries=None):
if mode == 'r':
self.mode = 'r'
self.open_read(headers=headers, query_args=query_args,
override_num_retries=override_num_retries)
elif mode == 'w':
self.mode = 'w'
self.open_write(headers=headers,
override_num_retries=override_num_retries)
else:
raise BotoClientError('Invalid mode: %s' % mode)
closed = False
def close(self, fast=False):
"""
Close this key.
:type fast: bool
:param fast: True if you want the connection to be closed without first
reading the content. This should only be used in cases where subsequent
calls don't need to return the content from the open HTTP connection.
Note: As explained at
http://docs.python.org/2/library/httplib.html#httplib.HTTPConnection.getresponse,
callers must read the whole response before sending a new request to the
server. Calling Key.close(fast=True) and making a subsequent request to
the server will work because boto will get an httplib exception and
close/reopen the connection.
"""
if self.resp and not fast:
self.resp.read()
self.resp = None
self.mode = None
self.closed = True
def next(self):
"""
By providing a next method, the key object supports use as an iterator.
For example, you can now say:
for bytes in key:
write bytes to a file or whatever
All of the HTTP connection stuff is handled for you.
"""
self.open_read()
data = self.resp.read(self.BufferSize)
if not data:
self.close()
raise StopIteration
return data
# Python 3 iterator support
__next__ = next
def read(self, size=0):
self.open_read()
if size == 0:
data = self.resp.read()
else:
data = self.resp.read(size)
if not data:
self.close()
return data
def change_storage_class(self, new_storage_class, dst_bucket=None,
validate_dst_bucket=True):
"""
Change the storage class of an existing key.
Depending on whether a different destination bucket is supplied
or not, this will either move the item within the bucket, preserving
all metadata and ACL info bucket changing the storage class or it
will copy the item to the provided destination bucket, also
preserving metadata and ACL info.
:type new_storage_class: string
:param new_storage_class: The new storage class for the Key.
Possible values are:
* STANDARD
* REDUCED_REDUNDANCY
:type dst_bucket: string
:param dst_bucket: The name of a destination bucket. If not
provided the current bucket of the key will be used.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
"""
bucket_name = dst_bucket or self.bucket.name
if new_storage_class == 'STANDARD':
return self.copy(bucket_name, self.name,
reduced_redundancy=False, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
elif new_storage_class == 'REDUCED_REDUNDANCY':
return self.copy(bucket_name, self.name,
reduced_redundancy=True, preserve_acl=True,
validate_dst_bucket=validate_dst_bucket)
else:
raise BotoClientError('Invalid storage class: %s' %
new_storage_class)
def copy(self, dst_bucket, dst_key, metadata=None,
reduced_redundancy=False, preserve_acl=False,
encrypt_key=False, validate_dst_bucket=True):
"""
Copy this Key to another bucket.
:type dst_bucket: string
:param dst_bucket: The name of the destination bucket
:type dst_key: string
:param dst_key: The name of the destination key
:type metadata: dict
:param metadata: Metadata to be associated with new key. If
metadata is supplied, it will replace the metadata of the
source key being copied. If no metadata is supplied, the
source key's metadata will be copied to the new key.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will force the
storage class of the new Key to be REDUCED_REDUNDANCY
regardless of the storage class of the key being copied.
The Reduced Redundancy Storage (RRS) feature of S3,
provides lower redundancy at lower storage cost.
:type preserve_acl: bool
:param preserve_acl: If True, the ACL from the source key will
be copied to the destination key. If False, the
destination key will have the default ACL. Note that
preserving the ACL in the new key object will require two
additional API calls to S3, one to retrieve the current
ACL and one to set that ACL on the new object. If you
don't care about the ACL, a value of False will be
significantly more efficient.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type validate_dst_bucket: bool
:param validate_dst_bucket: If True, will validate the dst_bucket
by using an extra list request.
:rtype: :class:`boto.s3.key.Key` or subclass
:returns: An instance of the newly created key object
"""
dst_bucket = self.bucket.connection.lookup(dst_bucket,
validate_dst_bucket)
if reduced_redundancy:
storage_class = 'REDUCED_REDUNDANCY'
else:
storage_class = self.storage_class
return dst_bucket.copy_key(dst_key, self.bucket.name,
self.name, metadata,
storage_class=storage_class,
preserve_acl=preserve_acl,
encrypt_key=encrypt_key,
src_version_id=self.version_id)
def startElement(self, name, attrs, connection):
if name == 'Owner':
self.owner = User(self)
return self.owner
else:
return None
def endElement(self, name, value, connection):
if name == 'Key':
self.name = value
elif name == 'ETag':
self.etag = value
elif name == 'IsLatest':
if value == 'true':
self.is_latest = True
else:
self.is_latest = False
elif name == 'LastModified':
self.last_modified = value
elif name == 'Size':
self.size = int(value)
elif name == 'StorageClass':
self.storage_class = value
elif name == 'Owner':
pass
elif name == 'VersionId':
self.version_id = value
else:
setattr(self, name, value)
def exists(self, headers=None):
"""
Returns True if the key exists
:rtype: bool
:return: Whether the key exists on S3
"""
return bool(self.bucket.lookup(self.name, headers=headers))
def delete(self, headers=None):
"""
Delete this key from S3
"""
return self.bucket.delete_key(self.name, version_id=self.version_id,
headers=headers)
def get_metadata(self, name):
return self.metadata.get(name)
def set_metadata(self, name, value):
# Ensure that metadata that is vital to signing is in the correct
# case. Applies to ``Content-Type`` & ``Content-MD5``.
if name.lower() == 'content-type':
self.metadata['Content-Type'] = value
elif name.lower() == 'content-md5':
self.metadata['Content-MD5'] = value
else:
self.metadata[name] = value
if name.lower() in Key.base_user_settable_fields:
self.__dict__[name.lower().replace('-', '_')] = value
def update_metadata(self, d):
self.metadata.update(d)
# convenience methods for setting/getting ACL
def set_acl(self, acl_str, headers=None):
if self.bucket is not None:
self.bucket.set_acl(acl_str, self.name, headers=headers)
def get_acl(self, headers=None):
if self.bucket is not None:
return self.bucket.get_acl(self.name, headers=headers)
def get_xml_acl(self, headers=None):
if self.bucket is not None:
return self.bucket.get_xml_acl(self.name, headers=headers)
def set_xml_acl(self, acl_str, headers=None):
if self.bucket is not None:
return self.bucket.set_xml_acl(acl_str, self.name, headers=headers)
def set_canned_acl(self, acl_str, headers=None):
return self.bucket.set_canned_acl(acl_str, self.name, headers)
def get_redirect(self):
"""Return the redirect location configured for this key.
If no redirect is configured (via set_redirect), then None
will be returned.
"""
response = self.bucket.connection.make_request(
'HEAD', self.bucket.name, self.name)
if response.status == 200:
return response.getheader('x-amz-website-redirect-location')
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def set_redirect(self, redirect_location, headers=None):
"""Configure this key to redirect to another location.
When the bucket associated with this key is accessed from the website
endpoint, a 301 redirect will be issued to the specified
`redirect_location`.
:type redirect_location: string
:param redirect_location: The location to redirect.
"""
if headers is None:
headers = {}
else:
headers = headers.copy()
headers['x-amz-website-redirect-location'] = redirect_location
response = self.bucket.connection.make_request('PUT', self.bucket.name,
self.name, headers)
if response.status == 200:
return True
else:
raise self.provider.storage_response_error(
response.status, response.reason, response.read())
def make_public(self, headers=None):
return self.bucket.set_canned_acl('public-read', self.name, headers)
def generate_url(self, expires_in, method='GET', headers=None,
query_auth=True, force_http=False, response_headers=None,
expires_in_absolute=False, version_id=None,
policy=None, reduced_redundancy=False, encrypt_key=False):
"""
Generate a URL to access this key.
:type expires_in: int
:param expires_in: How long the url is valid for, in seconds
:type method: string
:param method: The method to use for retrieving the file
(default is GET)
:type headers: dict
:param headers: Any headers to pass along in the request
:type query_auth: bool
:param query_auth:
:type force_http: bool
:param force_http: If True, http will be used instead of https.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type expires_in_absolute: bool
:param expires_in_absolute:
:type version_id: string
:param version_id: The version_id of the object to GET. If specified
this overrides any value in the key.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:rtype: string
:return: The URL to access the key
"""
provider = self.bucket.connection.provider
version_id = version_id or self.version_id
if headers is None:
headers = {}
else:
headers = headers.copy()
# add headers accordingly (usually PUT case)
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
return self.bucket.connection.generate_url(expires_in, method,
self.bucket.name, self.name,
headers, query_auth,
force_http,
response_headers,
expires_in_absolute,
version_id)
def send_file(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None):
"""
Upload a file to a key into a bucket on S3.
:type fp: file
:param fp: The file pointer to upload. The file pointer must
point point at the offset from which you wish to upload.
ie. if uploading the full file, it should point at the
start of the file. Normally when a file is opened for
reading, the fp will point at the first byte. See the
bytes parameter below for more info.
:type headers: dict
:param headers: The headers to pass along with the PUT request
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file
transfer. Providing a negative integer will cause your
callback to be called with each buffer read.
:type query_args: string
:param query_args: (optional) Arguments to pass in the query string.
:type chunked_transfer: boolean
:param chunked_transfer: (optional) If true, we use chunked
Transfer-Encoding.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
self._send_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size)
def _send_file_internal(self, fp, headers=None, cb=None, num_cb=10,
query_args=None, chunked_transfer=False, size=None,
hash_algs=None):
provider = self.bucket.connection.provider
try:
spos = fp.tell()
except IOError:
spos = None
self.read_from_stream = False
# If hash_algs is unset and the MD5 hasn't already been computed,
# default to an MD5 hash_alg to hash the data on-the-fly.
if hash_algs is None and not self.md5:
hash_algs = {'md5': md5}
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
def sender(http_conn, method, path, data, headers):
# This function is called repeatedly for temporary retries
# so we must be sure the file pointer is pointing at the
# start of the data.
if spos is not None and spos != fp.tell():
fp.seek(spos)
elif spos is None and self.read_from_stream:
# if seek is not supported, and we've read from this
# stream already, then we need to abort retries to
# avoid setting bad data.
raise provider.storage_data_error(
'Cannot retry failed request. fp does not support seeking.')
# If the caller explicitly specified host header, tell putrequest
# not to add a second host header. Similarly for accept-encoding.
skips = {}
if boto.utils.find_matching_headers('host', headers):
skips['skip_host'] = 1
if boto.utils.find_matching_headers('accept-encoding', headers):
skips['skip_accept_encoding'] = 1
http_conn.putrequest(method, path, **skips)
for key in headers:
http_conn.putheader(key, headers[key])
http_conn.endheaders()
save_debug = self.bucket.connection.debug
self.bucket.connection.debug = 0
# If the debuglevel < 4 we don't want to show connection
# payload, so turn off HTTP connection-level debug output (to
# be restored below).
# Use the getattr approach to allow this to work in AppEngine.
if getattr(http_conn, 'debuglevel', 0) < 4:
http_conn.set_debuglevel(0)
data_len = 0
if cb:
if size:
cb_size = size
elif self.size:
cb_size = self.size
else:
cb_size = 0
if chunked_transfer and cb_size == 0:
# For chunked Transfer, we call the cb for every 1MB
# of data transferred, except when we know size.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(
math.ceil(cb_size / self.BufferSize / (num_cb - 1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
bytes_togo = size
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
if not isinstance(chunk, bytes):
chunk = chunk.encode('utf-8')
if spos is None:
# read at least something from a non-seekable fp.
self.read_from_stream = True
while chunk:
chunk_len = len(chunk)
data_len += chunk_len
if chunked_transfer:
http_conn.send('%x;\r\n' % chunk_len)
http_conn.send(chunk)
http_conn.send('\r\n')
else:
http_conn.send(chunk)
for alg in digesters:
digesters[alg].update(chunk)
if bytes_togo:
bytes_togo -= chunk_len
if bytes_togo <= 0:
break
if cb:
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
if bytes_togo and bytes_togo < self.BufferSize:
chunk = fp.read(bytes_togo)
else:
chunk = fp.read(self.BufferSize)
if not isinstance(chunk, bytes):
chunk = chunk.encode('utf-8')
self.size = data_len
for alg in digesters:
self.local_hashes[alg] = digesters[alg].digest()
if chunked_transfer:
http_conn.send('0\r\n')
# http_conn.send("Content-MD5: %s\r\n" % self.base64md5)
http_conn.send('\r\n')
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
http_conn.set_debuglevel(save_debug)
self.bucket.connection.debug = save_debug
response = http_conn.getresponse()
body = response.read()
if not self.should_retry(response, chunked_transfer):
raise provider.storage_response_error(
response.status, response.reason, body)
return response
if not headers:
headers = {}
else:
headers = headers.copy()
# Overwrite user-supplied user-agent.
for header in find_matching_headers('User-Agent', headers):
del headers[header]
headers['User-Agent'] = UserAgent
# If storage_class is None, then a user has not explicitly requested
# a storage class, so we can assume STANDARD here
if self._storage_class not in [None, 'STANDARD']:
headers[provider.storage_class_header] = self.storage_class
if find_matching_headers('Content-Encoding', headers):
self.content_encoding = merge_headers_by_name(
'Content-Encoding', headers)
if find_matching_headers('Content-Language', headers):
self.content_language = merge_headers_by_name(
'Content-Language', headers)
content_type_headers = find_matching_headers('Content-Type', headers)
if content_type_headers:
# Some use cases need to suppress sending of the Content-Type
# header and depend on the receiving server to set the content
# type. This can be achieved by setting headers['Content-Type']
# to None when calling this method.
if (len(content_type_headers) == 1 and
headers[content_type_headers[0]] is None):
# Delete null Content-Type value to skip sending that header.
del headers[content_type_headers[0]]
else:
self.content_type = merge_headers_by_name(
'Content-Type', headers)
elif self.path:
self.content_type = mimetypes.guess_type(self.path)[0]
if self.content_type is None:
self.content_type = self.DefaultContentType
headers['Content-Type'] = self.content_type
else:
headers['Content-Type'] = self.content_type
if self.base64md5:
headers['Content-MD5'] = self.base64md5
if chunked_transfer:
headers['Transfer-Encoding'] = 'chunked'
#if not self.base64md5:
# headers['Trailer'] = "Content-MD5"
else:
headers['Content-Length'] = str(self.size)
# This is terrible. We need a SHA256 of the body for SigV4, but to do
# the chunked ``sender`` behavior above, the ``fp`` isn't available to
# the auth mechanism (because closures). Detect if it's SigV4 & embelish
# while we can before the auth calculations occur.
if 'hmac-v4-s3' in self.bucket.connection._required_auth_capability():
kwargs = {'fp': fp, 'hash_algorithm': hashlib.sha256}
if size is not None:
kwargs['size'] = size
headers['_sha256'] = compute_hash(**kwargs)[0]
headers['Expect'] = '100-Continue'
headers = boto.utils.merge_meta(headers, self.metadata, provider)
resp = self.bucket.connection.make_request(
'PUT',
self.bucket.name,
self.name,
headers,
sender=sender,
query_args=query_args
)
self.handle_version_headers(resp, force=True)
self.handle_addl_headers(resp.getheaders())
def should_retry(self, response, chunked_transfer=False):
provider = self.bucket.connection.provider
if not chunked_transfer:
if response.status in [500, 503]:
# 500 & 503 can be plain retries.
return True
if response.getheader('location'):
# If there's a redirect, plain retry.
return True
if 200 <= response.status <= 299:
self.etag = response.getheader('etag')
md5 = self.md5
if isinstance(md5, bytes):
md5 = md5.decode('utf-8')
# If you use customer-provided encryption keys, the ETag value that
# Amazon S3 returns in the response will not be the MD5 of the
# object.
server_side_encryption_customer_algorithm = response.getheader(
'x-amz-server-side-encryption-customer-algorithm', None)
if server_side_encryption_customer_algorithm is None:
if self.etag != '"%s"' % md5:
raise provider.storage_data_error(
'ETag from S3 did not match computed MD5. '
'%s vs. %s' % (self.etag, self.md5))
return True
if response.status == 400:
# The 400 must be trapped so the retry handler can check to
# see if it was a timeout.
# If ``RequestTimeout`` is present, we'll retry. Otherwise, bomb
# out.
body = response.read()
err = provider.storage_response_error(
response.status,
response.reason,
body
)
if err.error_code in ['RequestTimeout']:
raise PleaseRetryException(
"Saw %s, retrying" % err.error_code,
response=response
)
return False
def compute_md5(self, fp, size=None):
"""
:type fp: file
:param fp: File pointer to the file to MD5 hash. The file
pointer will be reset to the same position before the
method returns.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where the file is being split
in place into different parts. Less bytes may be available.
"""
hex_digest, b64_digest, data_size = compute_md5(fp, size=size)
# Returned values are MD5 hash, base64 encoded MD5 hash, and data size.
# The internal implementation of compute_md5() needs to return the
# data size but we don't want to return that value to the external
# caller because it changes the class interface (i.e. it might
# break some code) so we consume the third tuple value here and
# return the remainder of the tuple to the caller, thereby preserving
# the existing interface.
self.size = data_size
return (hex_digest, b64_digest)
def set_contents_from_stream(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None,
reduced_redundancy=False, query_args=None,
size=None):
"""
Store an object using the name of the Key object as the key in
cloud and the contents of the data stream pointed to by 'fp' as
the contents.
The stream object is not seekable and total size is not known.
This has the implication that we can't specify the
Content-Size and Content-MD5 in the header. So for huge
uploads, the delay in calculating MD5 is avoided but with a
penalty of inability to verify the integrity of the uploaded
data.
:type fp: file
:param fp: the file whose contents are to be uploaded
:type headers: dict
:param headers: additional HTTP headers to be sent with the
PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will first check
to see if an object exists in the bucket with the same key. If it
does, it won't overwrite it. The default value is True which will
overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted to GS and the second representing the
total number of bytes that need to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter, this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.gs.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the new key
in GS.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading a
file in multiple parts where you are splitting the file up
into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
"""
provider = self.bucket.connection.provider
if not provider.supports_chunked_transfer():
raise BotoClientError('%s does not support chunked transfer'
% provider.get_provider_name())
# Name of the Object should be specified explicitly for Streams.
if not self.name or self.name == '':
raise BotoClientError('Cannot determine the destination '
'object name for the given stream')
if headers is None:
headers = {}
if policy:
headers[provider.acl_header] = policy
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
if self.bucket is not None:
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers, cb, num_cb, query_args,
chunked_transfer=True, size=size)
def set_contents_from_file(self, fp, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False, query_args=None,
encrypt_key=False, size=None, rewind=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file pointed to by 'fp' as the
contents. The data is read from 'fp' from its current position until
'size' bytes have been read or EOF.
:type fp: file
:param fp: the file whose contents to upload
:type headers: dict
:param headers: Additional HTTP headers that will be sent with
the PUT request.
:type replace: bool
:param replace: If this parameter is False, the method will
first check to see if an object exists in the bucket with
the same key. If it does, it won't overwrite it. The
default value is True which will overwrite the object.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
:type size: int
:param size: (optional) The Maximum number of bytes to read
from the file pointer (fp). This is useful when uploading
a file in multiple parts where you are splitting the file
up into different ranges to be uploaded. If not specified,
the default behaviour is to read all bytes from the file
pointer. Less bytes may be available.
:type rewind: bool
:param rewind: (optional) If True, the file pointer (fp) will
be rewound to the start before any bytes are read from
it. The default behaviour is False which reads from the
current position of the file pointer (fp).
:rtype: int
:return: The number of bytes written to the key.
"""
provider = self.bucket.connection.provider
headers = headers or {}
if policy:
headers[provider.acl_header] = policy
if encrypt_key:
headers[provider.server_side_encryption_header] = 'AES256'
if rewind:
# caller requests reading from beginning of fp.
fp.seek(0, os.SEEK_SET)
else:
# The following seek/tell/seek logic is intended
# to detect applications using the older interface to
# set_contents_from_file(), which automatically rewound the
# file each time the Key was reused. This changed with commit
# 14ee2d03f4665fe20d19a85286f78d39d924237e, to support uploads
# split into multiple parts and uploaded in parallel, and at
# the time of that commit this check was added because otherwise
# older programs would get a success status and upload an empty
# object. Unfortuantely, it's very inefficient for fp's implemented
# by KeyFile (used, for example, by gsutil when copying between
# providers). So, we skip the check for the KeyFile case.
# TODO: At some point consider removing this seek/tell/seek
# logic, after enough time has passed that it's unlikely any
# programs remain that assume the older auto-rewind interface.
if not isinstance(fp, KeyFile):
spos = fp.tell()
fp.seek(0, os.SEEK_END)
if fp.tell() == spos:
fp.seek(0, os.SEEK_SET)
if fp.tell() != spos:
# Raise an exception as this is likely a programming
# error whereby there is data before the fp but nothing
# after it.
fp.seek(spos)
raise AttributeError('fp is at EOF. Use rewind option '
'or seek() to data start.')
# seek back to the correct position.
fp.seek(spos)
if reduced_redundancy:
self.storage_class = 'REDUCED_REDUNDANCY'
if provider.storage_class_header:
headers[provider.storage_class_header] = self.storage_class
# TODO - What if provider doesn't support reduced reduncancy?
# What if different providers provide different classes?
if hasattr(fp, 'name'):
self.path = fp.name
if self.bucket is not None:
if not md5 and provider.supports_chunked_transfer():
# defer md5 calculation to on the fly and
# we don't know anything about size yet.
chunked_transfer = True
self.size = None
else:
chunked_transfer = False
if isinstance(fp, KeyFile):
# Avoid EOF seek for KeyFile case as it's very inefficient.
key = fp.getkey()
size = key.size - fp.tell()
self.size = size
# At present both GCS and S3 use MD5 for the etag for
# non-multipart-uploaded objects. If the etag is 32 hex
# chars use it as an MD5, to avoid having to read the file
# twice while transferring.
if (re.match('^"[a-fA-F0-9]{32}"$', key.etag)):
etag = key.etag.strip('"')
md5 = (etag, base64.b64encode(binascii.unhexlify(etag)))
if not md5:
# compute_md5() and also set self.size to actual
# size of the bytes read computing the md5.
md5 = self.compute_md5(fp, size)
# adjust size if required
size = self.size
elif size:
self.size = size
else:
# If md5 is provided, still need to size so
# calculate based on bytes to end of content
spos = fp.tell()
fp.seek(0, os.SEEK_END)
self.size = fp.tell() - spos
fp.seek(spos)
size = self.size
self.md5 = md5[0]
self.base64md5 = md5[1]
if self.name is None:
self.name = self.md5
if not replace:
if self.bucket.lookup(self.name):
return
self.send_file(fp, headers=headers, cb=cb, num_cb=num_cb,
query_args=query_args,
chunked_transfer=chunked_transfer, size=size)
# return number of bytes written.
return self.size
def set_contents_from_filename(self, filename, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the contents of the file named by 'filename'.
See set_contents_from_file method for details about the
parameters.
:type filename: string
:param filename: The name of the file that you want to put onto S3
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file
if it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost. :type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object
will be encrypted on the server-side by S3 and will be
stored in an encrypted form while at rest in S3.
:rtype: int
:return: The number of bytes written to the key.
"""
with open(filename, 'rb') as fp:
return self.set_contents_from_file(fp, headers, replace, cb,
num_cb, policy, md5,
reduced_redundancy,
encrypt_key=encrypt_key)
def set_contents_from_string(self, string_data, headers=None, replace=True,
cb=None, num_cb=10, policy=None, md5=None,
reduced_redundancy=False,
encrypt_key=False):
"""
Store an object in S3 using the name of the Key object as the
key in S3 and the string 's' as the contents.
See set_contents_from_file method for details about the
parameters.
:type headers: dict
:param headers: Additional headers to pass along with the
request to AWS.
:type replace: bool
:param replace: If True, replaces the contents of the file if
it already exists.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3.
:type md5: A tuple containing the hexdigest version of the MD5
checksum of the file as the first element and the
Base64-encoded version of the plain checksum as the second
element. This is the same format returned by the
compute_md5 method.
:param md5: If you need to compute the MD5 for any reason
prior to upload, it's silly to have to do it twice so this
param, if present, will be used as the MD5 values of the
file. Otherwise, the checksum will be computed.
:type reduced_redundancy: bool
:param reduced_redundancy: If True, this will set the storage
class of the new Key to be REDUCED_REDUNDANCY. The Reduced
Redundancy Storage (RRS) feature of S3, provides lower
redundancy at lower storage cost.
:type encrypt_key: bool
:param encrypt_key: If True, the new copy of the object will
be encrypted on the server-side by S3 and will be stored
in an encrypted form while at rest in S3.
"""
if not isinstance(string_data, bytes):
string_data = string_data.encode("utf-8")
fp = BytesIO(string_data)
r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, reduced_redundancy,
encrypt_key=encrypt_key)
fp.close()
return r
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None):
"""
Retrieves a file from an S3 Key
:type fp: file
:param fp: File pointer to put the data into
:type headers: string
:param: headers to send when retrieving the files
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: Flag for whether to get a torrent for the file
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
self._get_file_internal(fp, headers=headers, cb=cb, num_cb=num_cb,
torrent=torrent, version_id=version_id,
override_num_retries=override_num_retries,
response_headers=response_headers,
hash_algs=None,
query_args=None)
def _get_file_internal(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None, hash_algs=None, query_args=None):
if headers is None:
headers = {}
save_debug = self.bucket.connection.debug
if self.bucket.connection.debug == 1:
self.bucket.connection.debug = 0
query_args = query_args or []
if torrent:
query_args.append('torrent')
if hash_algs is None and not torrent:
hash_algs = {'md5': md5}
digesters = dict((alg, hash_algs[alg]()) for alg in hash_algs or {})
# If a version_id is passed in, use that. If not, check to see
# if the Key object has an explicit version_id and, if so, use that.
# Otherwise, don't pass a version_id query param.
if version_id is None:
version_id = self.version_id
if version_id:
query_args.append('versionId=%s' % version_id)
if response_headers:
for key in response_headers:
query_args.append('%s=%s' % (
key, urllib.parse.quote(response_headers[key])))
query_args = '&'.join(query_args)
self.open('r', headers, query_args=query_args,
override_num_retries=override_num_retries)
data_len = 0
if cb:
if self.size is None:
cb_size = 0
else:
cb_size = self.size
if self.size is None and num_cb != -1:
# If size is not available due to chunked transfer for example,
# we'll call the cb for every 1MB of data transferred.
cb_count = (1024 * 1024) / self.BufferSize
elif num_cb > 1:
cb_count = int(math.ceil(cb_size/self.BufferSize/(num_cb-1.0)))
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = 0
cb(data_len, cb_size)
try:
for bytes in self:
fp.write(bytes)
data_len += len(bytes)
for alg in digesters:
digesters[alg].update(bytes)
if cb:
if cb_size > 0 and data_len >= cb_size:
break
i += 1
if i == cb_count or cb_count == -1:
cb(data_len, cb_size)
i = 0
except IOError as e:
if e.errno == errno.ENOSPC:
raise StorageDataError('Out of space for destination file '
'%s' % fp.name)
raise
if cb and (cb_count <= 1 or i > 0) and data_len > 0:
cb(data_len, cb_size)
for alg in digesters:
self.local_hashes[alg] = digesters[alg].digest()
if self.size is None and not torrent and "Range" not in headers:
self.size = data_len
self.close()
self.bucket.connection.debug = save_debug
def get_torrent_file(self, fp, headers=None, cb=None, num_cb=10):
"""
Get a torrent file (see to get_file)
:type fp: file
:param fp: The file pointer of where to put the torrent
:type headers: dict
:param headers: Headers to be passed
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
"""
return self.get_file(fp, headers, cb, num_cb, torrent=True)
def get_contents_to_file(self, fp, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Write the contents of the object to the file pointed
to by 'fp'.
:type fp: File -like object
:param fp:
:type headers: dict
:param headers: additional HTTP headers that will be sent with
the GET request.
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent
file as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
if self.bucket is not None:
if res_download_handler:
res_download_handler.get_file(self, fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id)
else:
self.get_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
def get_contents_to_filename(self, filename, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
res_download_handler=None,
response_headers=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Store contents of the object to a file named by 'filename'.
See get_contents_to_file method for details about the
parameters.
:type filename: string
:param filename: The filename of where to put the file contents
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type res_upload_handler: ResumableDownloadHandler
:param res_download_handler: If provided, this handler will
perform the download.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
"""
try:
with open(filename, 'wb') as fp:
self.get_contents_to_file(fp, headers, cb, num_cb,
torrent=torrent,
version_id=version_id,
res_download_handler=res_download_handler,
response_headers=response_headers)
except Exception:
os.remove(filename)
raise
# if last_modified date was sent from s3, try to set file's timestamp
if self.last_modified is not None:
try:
modified_tuple = email.utils.parsedate_tz(self.last_modified)
modified_stamp = int(email.utils.mktime_tz(modified_tuple))
os.utime(fp.name, (modified_stamp, modified_stamp))
except Exception:
pass
def get_contents_as_string(self, headers=None,
cb=None, num_cb=10,
torrent=False,
version_id=None,
response_headers=None, encoding=None):
"""
Retrieve an object from S3 using the name of the Key object as the
key in S3. Return the contents of the object as a string.
See get_contents_to_file method for details about the
parameters.
:type headers: dict
:param headers: Any additional headers to send in the request
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept two
integer parameters, the first representing the number of
bytes that have been successfully transmitted to S3 and
the second representing the size of the to be transmitted
object.
:type cb: int
:param num_cb: (optional) If a callback is specified with the
cb parameter this parameter determines the granularity of
the callback by defining the maximum number of times the
callback will be called during the file transfer.
:type torrent: bool
:param torrent: If True, returns the contents of a torrent file
as a string.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP
headers/values that will override any headers associated
with the stored object in the response. See
http://goo.gl/EWOPb for details.
:type version_id: str
:param version_id: The ID of a particular version of the object.
If this parameter is not supplied but the Key object has
a ``version_id`` attribute, that value will be used when
retrieving the object. You can set the Key object's
``version_id`` attribute to None to always grab the latest
version from a version-enabled bucket.
:type encoding: str
:param encoding: The text encoding to use, such as ``utf-8``
or ``iso-8859-1``. If set, then a string will be returned.
Defaults to ``None`` and returns bytes.
:rtype: bytes or str
:returns: The contents of the file as bytes or a string
"""
fp = BytesIO()
self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
version_id=version_id,
response_headers=response_headers)
value = fp.getvalue()
if encoding is not None:
value = value.decode(encoding)
return value
def add_email_grant(self, permission, email_address, headers=None):
"""
Convenience method that provides a quick way to add an email grant
to a key. This method retrieves the current ACL, creates a new
grant based on the parameters passed in, adds that grant to the ACL
and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type email_address: string
:param email_address: The email address associated with the AWS
account your are granting the permission to.
:type recursive: boolean
:param recursive: A boolean value to controls whether the
command will apply the grant to all keys within the bucket
or not. The default value is False. By passing a True
value, the call will iterate through all keys in the
bucket and apply the same grant to each key. CAUTION: If
you have a lot of keys, this could take a long time!
"""
policy = self.get_acl(headers=headers)
policy.acl.add_email_grant(permission, email_address)
self.set_acl(policy, headers=headers)
def add_user_grant(self, permission, user_id, headers=None,
display_name=None):
"""
Convenience method that provides a quick way to add a canonical
user grant to a key. This method retrieves the current ACL,
creates a new grant based on the parameters passed in, adds that
grant to the ACL and then PUT's the new ACL back to S3.
:type permission: string
:param permission: The permission being granted. Should be one of:
(READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
:type user_id: string
:param user_id: The canonical user id associated with the AWS
account your are granting the permission to.
:type display_name: string
:param display_name: An option string containing the user's
Display Name. Only required on Walrus.
"""
policy = self.get_acl(headers=headers)
policy.acl.add_user_grant(permission, user_id,
display_name=display_name)
self.set_acl(policy, headers=headers)
def _normalize_metadata(self, metadata):
if type(metadata) == set:
norm_metadata = set()
for k in metadata:
norm_metadata.add(k.lower())
else:
norm_metadata = {}
for k in metadata:
norm_metadata[k.lower()] = metadata[k]
return norm_metadata
def _get_remote_metadata(self, headers=None):
"""
Extracts metadata from existing URI into a dict, so we can
overwrite/delete from it to form the new set of metadata to apply to a
key.
"""
metadata = {}
for underscore_name in self._underscore_base_user_settable_fields:
if hasattr(self, underscore_name):
value = getattr(self, underscore_name)
if value:
# Generate HTTP field name corresponding to "_" named field.
field_name = underscore_name.replace('_', '-')
metadata[field_name.lower()] = value
# self.metadata contains custom metadata, which are all user-settable.
prefix = self.provider.metadata_prefix
for underscore_name in self.metadata:
field_name = underscore_name.replace('_', '-')
metadata['%s%s' % (prefix, field_name.lower())] = (
self.metadata[underscore_name])
return metadata
def set_remote_metadata(self, metadata_plus, metadata_minus, preserve_acl,
headers=None):
metadata_plus = self._normalize_metadata(metadata_plus)
metadata_minus = self._normalize_metadata(metadata_minus)
metadata = self._get_remote_metadata()
metadata.update(metadata_plus)
for h in metadata_minus:
if h in metadata:
del metadata[h]
src_bucket = self.bucket
# Boto prepends the meta prefix when adding headers, so strip prefix in
# metadata before sending back in to copy_key() call.
rewritten_metadata = {}
for h in metadata:
if (h.startswith('x-goog-meta-') or h.startswith('x-amz-meta-')):
rewritten_h = (h.replace('x-goog-meta-', '')
.replace('x-amz-meta-', ''))
else:
rewritten_h = h
rewritten_metadata[rewritten_h] = metadata[h]
metadata = rewritten_metadata
src_bucket.copy_key(self.name, self.bucket.name, self.name,
metadata=metadata, preserve_acl=preserve_acl,
headers=headers)
def restore(self, days, headers=None):
"""Restore an object from an archive.
:type days: int
:param days: The lifetime of the restored object (must
be at least 1 day). If the object is already restored
then this parameter can be used to readjust the lifetime
of the restored object. In this case, the days
param is with respect to the initial time of the request.
If the object has not been restored, this param is with
respect to the completion time of the request.
"""
response = self.bucket.connection.make_request(
'POST', self.bucket.name, self.name,
data=self.RestoreBody % days,
headers=headers, query_args='restore')
if response.status not in (200, 202):
provider = self.bucket.connection.provider
raise provider.storage_response_error(response.status,
response.reason,
response.read())
|
doumadou/ssbc
|
refs/heads/master
|
search/migrations/0004_auto_20150511_0339.py
|
36
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('search', '0003_auto_20150511_0316'),
]
operations = [
migrations.AlterField(
model_name='hash',
name='tagged',
field=models.BooleanField(default=False, db_index=True),
),
]
|
MatthewWilkes/django
|
refs/heads/master
|
tests/migrations/test_migrations_squashed_complex/1_auto.py
|
1155
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
operations = [
migrations.RunPython(migrations.RunPython.noop)
]
|
windskyer/nova
|
refs/heads/master
|
nova/db/sqlalchemy/migrate_repo/versions/294_add_service_heartbeat.py
|
72
|
# Copyright (c) 2015 Wind River Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Table, Column, DateTime
BASE_TABLE_NAME = 'services'
NEW_COLUMN_NAME = 'last_seen_up'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
for prefix in ('', 'shadow_'):
table = Table(prefix + BASE_TABLE_NAME, meta, autoload=True)
new_column = Column(NEW_COLUMN_NAME, DateTime, nullable=True)
if not hasattr(table.c, NEW_COLUMN_NAME):
table.create_column(new_column)
|
gbn972/hug
|
refs/heads/develop
|
tests/test_decorators.py
|
6
|
"""tests/test_decorators.py.
Tests the decorators that power hugs core functionality
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from falcon.testing import StartResponseMock, create_environ
import sys
import hug
import pytest
api = sys.modules[__name__]
def test_basic_call():
'''The most basic Happy-Path test for Hug APIs'''
@hug.call()
def hello_world():
return "Hello World!"
assert hello_world() == "Hello World!"
assert hello_world.interface
assert hug.test.get(api, '/hello_world').data == "Hello World!"
def test_single_parameter():
'''Test that an api with a single parameter interacts as desired'''
@hug.call()
def echo(text):
return text
assert echo('Embrace') == 'Embrace'
assert echo.interface
with pytest.raises(TypeError):
echo()
assert hug.test.get(api, 'echo', text="Hello").data == "Hello"
assert 'required' in hug.test.get(api, '/echo').data['errors']['text'].lower()
def test_custom_url():
'''Test to ensure that it's possible to have a route that differs from the function name'''
@hug.call('/custom_route')
def method_name():
return 'works'
assert hug.test.get(api, 'custom_route').data == 'works'
def test_api_auto_initiate():
'''Test to ensure that Hug automatically exposes a wsgi server method'''
assert isinstance(__hug_wsgi__(create_environ('/non_existant'), StartResponseMock()), (list, tuple))
def test_parameters():
'''Tests to ensure that Hug can easily handle multiple parameters with multiple types'''
@hug.call()
def multiple_parameter_types(start, middle:hug.types.text, end:hug.types.number=5, **kwargs):
return 'success'
assert hug.test.get(api, 'multiple_parameter_types', start='start', middle='middle', end=7).data == 'success'
assert hug.test.get(api, 'multiple_parameter_types', start='start', middle='middle').data == 'success'
assert hug.test.get(api, 'multiple_parameter_types', start='start', middle='middle', other="yo").data == 'success'
nan_test = hug.test.get(api, 'multiple_parameter_types', start='start', middle='middle', end='NAN').data
assert 'invalid' in nan_test['errors']['end']
def test_parameter_injection():
'''Tests that hug correctly auto injects variables such as request and response'''
@hug.call()
def inject_request(request):
return request and 'success'
assert hug.test.get(api, 'inject_request').data == 'success'
@hug.call()
def inject_response(response):
return response and 'success'
assert hug.test.get(api, 'inject_response').data == 'success'
@hug.call()
def inject_both(request, response):
return request and response and 'success'
assert hug.test.get(api, 'inject_both').data == 'success'
@hug.call()
def wont_appear_in_kwargs(**kwargs):
return 'request' not in kwargs and 'response' not in kwargs and 'success'
assert hug.test.get(api, 'wont_appear_in_kwargs').data == 'success'
def test_method_routing():
'''Test that all hugs HTTP routers correctly route methods to the correct handler'''
@hug.get()
def method():
return 'GET'
@hug.post()
def method():
return 'POST'
@hug.connect()
def method():
return 'CONNECT'
@hug.delete()
def method():
return 'DELETE'
@hug.options()
def method():
return 'OPTIONS'
@hug.put()
def method():
return 'PUT'
@hug.trace()
def method():
return 'TRACE'
assert hug.test.get(api, 'method').data == 'GET'
assert hug.test.post(api, 'method').data == 'POST'
assert hug.test.connect(api, 'method').data == 'CONNECT'
assert hug.test.delete(api, 'method').data == 'DELETE'
assert hug.test.options(api, 'method').data == 'OPTIONS'
assert hug.test.put(api, 'method').data == 'PUT'
assert hug.test.trace(api, 'method').data == 'TRACE'
@hug.call(accept=('GET', 'POST'))
def accepts_get_and_post():
return 'success'
assert hug.test.get(api, 'accepts_get_and_post').data == 'success'
assert hug.test.post(api, 'accepts_get_and_post').data == 'success'
assert 'method not allowed' in hug.test.trace(api, 'accepts_get_and_post').status.lower()
def test_versioning():
'''Ensure that Hug correctly routes API functions based on version'''
@hug.get('/echo')
def echo(text):
return "Not Implemented"
@hug.get('/echo', versions=1)
def echo(text):
return text
@hug.get('/echo', versions=range(2, 4))
def echo(text):
return "Echo: {text}".format(**locals())
@hug.get('/echo', versions=7)
def echo(text, api_version):
return api_version
assert hug.test.get(api, 'v1/echo', text="hi").data == 'hi'
assert hug.test.get(api, 'v2/echo', text="hi").data == "Echo: hi"
assert hug.test.get(api, 'v3/echo', text="hi").data == "Echo: hi"
assert hug.test.get(api, 'echo', text="hi", api_version=3).data == "Echo: hi"
assert hug.test.get(api, 'echo', text="hi", headers={'X-API-VERSION': '3'}).data == "Echo: hi"
assert hug.test.get(api, 'v4/echo', text="hi").data == "Not Implemented"
assert hug.test.get(api, 'v7/echo', text="hi").data == 7
assert hug.test.get(api, 'echo', text="hi").data == "Not Implemented"
assert hug.test.get(api, 'echo', text="hi", api_version=3, body={'api_vertion': 4}).data == "Echo: hi"
with pytest.raises(ValueError):
hug.test.get(api, 'v4/echo', text="hi", api_version=3)
def test_multiple_version_injection():
'''Test to ensure that the version injected sticks when calling other functions within an API'''
@hug.get(versions=(1, 2, None))
def my_api_function(hug_api_version):
return hug_api_version
assert hug.test.get(api, 'v1/my_api_function').data == 1
assert hug.test.get(api, 'v2/my_api_function').data == 2
assert hug.test.get(api, 'v3/my_api_function').data == 3
@hug.get(versions=(None, 1))
def call_other_function(hug_current_api):
return hug_current_api.my_api_function()
assert hug.test.get(api, 'v1/call_other_function').data == 1
assert call_other_function() == 1
@hug.get(versions=1)
def one_more_level_of_indirection(hug_current_api):
return hug_current_api.call_other_function()
assert hug.test.get(api, 'v1/one_more_level_of_indirection').data == 1
assert one_more_level_of_indirection() == 1
def test_json_auto_convert():
'''Test to ensure all types of data correctly auto convert into json'''
@hug.get('/test_json')
def test_json(text):
return text
assert hug.test.get(api, 'test_json', body={'text': 'value'}).data == "value"
@hug.get('/test_json_body')
def test_json_body(body):
return body
assert hug.test.get(api, 'test_json_body', body=['value1', 'value2']).data == ['value1', 'value2']
@hug.get(parse_body=False)
def test_json_body_stream_only(body=None):
return body
assert hug.test.get(api, 'test_json_body_stream_only', body=['value1', 'value2']).data == None
def test_output_format():
'''Test to ensure it's possible to quickly change the default hug output format'''
old_formatter = api.__hug__.output_format
@hug.default_output_format()
def augmented(data):
return hug.output_format.json(['Augmented', data])
@hug.get()
def hello():
return "world"
assert hug.test.get(api, 'hello').data == ['Augmented', 'world']
@hug.default_output_format()
def jsonify(data):
return hug.output_format.json(data)
api.__hug__.output_format = hug.output_format.text
@hug.get()
def my_method():
return {'Should': 'work'}
assert hug.test.get(api, 'my_method').data == "{'Should': 'work'}"
api.__hug__.output_format = old_formatter
def test_input_format():
'''Test to ensure it's possible to quickly change the default hug output format'''
old_format = api.__hug__.input_format('application/json')
api.__hug__.set_input_format('application/json', lambda a: {'no': 'relation'})
@hug.get()
def hello(body):
return body
assert hug.test.get(api, 'hello', body={'should': 'work'}).data == {'no': 'relation'}
api.__hug__.set_input_format('application/json', old_format)
def test_middleware():
'''Test to ensure the basic concept of a middleware works as expected'''
@hug.request_middleware()
def proccess_data(request, response):
request.env['SERVER_NAME'] = 'Bacon'
@hug.response_middleware()
def proccess_data(request, response, resource):
response.set_header('Bacon', 'Yumm')
@hug.get()
def hello(request):
return request.env['SERVER_NAME']
result = hug.test.get(api, 'hello')
assert result.data == 'Bacon'
assert result.headers_dict['Bacon'] == 'Yumm'
def test_extending_api():
'''Test to ensure it's possible to extend the current API from an external file'''
@hug.extend_api('/fake')
def extend_with():
import tests.module_fake
return (tests.module_fake, )
assert hug.test.get(api, 'fake/made_up_api').data == True
|
EvanK/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/google/gcp_compute_health_check_facts.py
|
9
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_health_check_facts
description:
- Gather facts for GCP HealthCheck
short_description: Gather facts for GCP HealthCheck
version_added: 2.7
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
filters:
description:
- A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters.)
- Each additional filter in the list will act be added as an AND condition (filter1
and filter2) .
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: a health check facts
gcp_compute_health_check_facts:
filters:
- name = test_object
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
checkIntervalSec:
description:
- How often (in seconds) to send a health check. The default value is 5 seconds.
returned: success
type: int
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
returned: success
type: str
healthyThreshold:
description:
- A so-far unhealthy instance will be marked healthy after this many consecutive
successes. The default value is 2.
returned: success
type: int
id:
description:
- The unique identifier for the resource. This identifier is defined by the
server.
returned: success
type: int
name:
description:
- Name of the resource. Provided by the client when the resource is created.
The name must be 1-63 characters long, and comply with RFC1035. Specifically,
the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
timeoutSec:
description:
- How long (in seconds) to wait before claiming failure.
- The default value is 5 seconds. It is invalid for timeoutSec to have greater
value than checkIntervalSec.
returned: success
type: int
unhealthyThreshold:
description:
- A so-far healthy instance will be marked unhealthy after this many consecutive
failures. The default value is 2.
returned: success
type: int
type:
description:
- Specifies the type of the healthCheck, either TCP, SSL, HTTP or HTTPS. If
not specified, the default is TCP. Exactly one of the protocol-specific health
check field must be specified, which must match type field.
returned: success
type: str
httpHealthCheck:
description:
- A nested object resource.
returned: success
type: complex
contains:
host:
description:
- The value of the host header in the HTTP health check request.
- If left empty (default value), the public IP on behalf of which this health
check is performed will be used.
returned: success
type: str
requestPath:
description:
- The request path of the HTTP health check request.
- The default value is /.
returned: success
type: str
response:
description:
- The bytes to match against the beginning of the response data. If left
empty (the default value), any response will indicate health. The response
data can only be ASCII.
returned: success
type: str
port:
description:
- The TCP port number for the HTTP health check request.
- The default value is 80.
returned: success
type: int
portName:
description:
- Port name as defined in InstanceGroup#NamedPort#name. If both port and
port_name are defined, port takes precedence.
returned: success
type: str
proxyHeader:
description:
- Specifies the type of proxy header to append before sending data to the
backend, either NONE or PROXY_V1. The default is NONE.
returned: success
type: str
httpsHealthCheck:
description:
- A nested object resource.
returned: success
type: complex
contains:
host:
description:
- The value of the host header in the HTTPS health check request.
- If left empty (default value), the public IP on behalf of which this health
check is performed will be used.
returned: success
type: str
requestPath:
description:
- The request path of the HTTPS health check request.
- The default value is /.
returned: success
type: str
response:
description:
- The bytes to match against the beginning of the response data. If left
empty (the default value), any response will indicate health. The response
data can only be ASCII.
returned: success
type: str
port:
description:
- The TCP port number for the HTTPS health check request.
- The default value is 443.
returned: success
type: int
portName:
description:
- Port name as defined in InstanceGroup#NamedPort#name. If both port and
port_name are defined, port takes precedence.
returned: success
type: str
proxyHeader:
description:
- Specifies the type of proxy header to append before sending data to the
backend, either NONE or PROXY_V1. The default is NONE.
returned: success
type: str
tcpHealthCheck:
description:
- A nested object resource.
returned: success
type: complex
contains:
request:
description:
- The application data to send once the TCP connection has been established
(default value is empty). If both request and response are empty, the
connection establishment alone will indicate health. The request data
can only be ASCII.
returned: success
type: str
response:
description:
- The bytes to match against the beginning of the response data. If left
empty (the default value), any response will indicate health. The response
data can only be ASCII.
returned: success
type: str
port:
description:
- The TCP port number for the TCP health check request.
- The default value is 443.
returned: success
type: int
portName:
description:
- Port name as defined in InstanceGroup#NamedPort#name. If both port and
port_name are defined, port takes precedence.
returned: success
type: str
proxyHeader:
description:
- Specifies the type of proxy header to append before sending data to the
backend, either NONE or PROXY_V1. The default is NONE.
returned: success
type: str
sslHealthCheck:
description:
- A nested object resource.
returned: success
type: complex
contains:
request:
description:
- The application data to send once the SSL connection has been established
(default value is empty). If both request and response are empty, the
connection establishment alone will indicate health. The request data
can only be ASCII.
returned: success
type: str
response:
description:
- The bytes to match against the beginning of the response data. If left
empty (the default value), any response will indicate health. The response
data can only be ASCII.
returned: success
type: str
port:
description:
- The TCP port number for the SSL health check request.
- The default value is 443.
returned: success
type: int
portName:
description:
- Port name as defined in InstanceGroup#NamedPort#name. If both port and
port_name are defined, port takes precedence.
returned: success
type: str
proxyHeader:
description:
- Specifies the type of proxy header to append before sending data to the
backend, either NONE or PROXY_V1. The default is NONE.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str')))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
items = fetch_list(module, collection(module), query_options(module.params['filters']))
if items.get('items'):
items = items.get('items')
else:
items = []
return_value = {'items': items}
module.exit_json(**return_value)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/healthChecks".format(**module.params)
def fetch_list(module, link, query):
auth = GcpSession(module, 'compute')
response = auth.get(link, params={'filter': query})
return return_if_object(module, response)
def query_options(filters):
if not filters:
return ''
if len(filters) == 1:
return filters[0]
else:
queries = []
for f in filters:
# For multiple queries, all queries should have ()
if f[0] != '(' and f[-1] != ')':
queries.append("(%s)" % ''.join(f))
else:
queries.append(f)
return ' '.join(queries)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
|
NMGRL/pychron
|
refs/heads/develop
|
pychron/experiment/automated_run/result.py
|
2
|
# ===============================================================================
# Copyright 2016 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import HasTraits, Str, Property, Instance
# ============= standard library imports ========================
# ============= local library imports ==========================
from uncertainties import nominal_value, std_dev
from pychron.core.helpers.formatting import uformat_percent_error, floatfmt, errorfmt
from pychron.experiment.conditional.conditional import AutomatedRunConditional
from pychron.processing.isotope_group import IsotopeGroup
from pychron.pychron_constants import NULL_STR
class AutomatedRunResult(HasTraits):
runid = Str
analysis_timestamp = None
isotope_group = Instance(IsotopeGroup)
summary = Property
tripped_conditional = Instance(AutomatedRunConditional)
centering_results = None
def _get_summary(self):
lines = ['RunID= {}'.format(self.runid)]
at = self.analysis_timestamp
if at is not None:
lines.append('Run Time= {}'.format(at.strftime('%H:%M:%S %m-%d-%Y')))
funcs = [self._intensities, self._tripped_conditional,
lambda: self._make_header('Summary'),
self._make_peak_statistics,
self._make_summary]
for func in funcs:
try:
lines.append(func())
except BaseException:
pass
return '\n'.join(lines)
def _make_summary(self):
return 'No Summary Available'
def _make_peak_statistics(self):
ret = ''
def f(v):
try:
v = '{:0.2f}'.format(v)
except (ValueError, TypeError):
v = NULL_STR
return v
if self.centering_results:
fmt = '{:<10s} {:<10s} {:<10s} {:<10s}'
s = [fmt.format('Det', 'Res.', 'Low RP', 'High RP')]
for r in self.centering_results:
s.append(fmt.format(r.detector,
f(r.resolution),
f(r.low_resolving_power),
f(r.high_resolving_power)))
ret = '\n'.join(s)
return ret
def _intensities(self):
lines = []
if self.isotope_group:
def fformat(s, n=5):
return '{{:0.{}f}}'.format(n).format(s)
names = 'Iso.', 'Det.', 'Intensity (fA)', '%Err', 'Intercept (fA)', '%Err', 'Baseline (fA)', '%Err', \
'Blank (fA)', '%Err'
colwidths = 6, 8, 25, 8, 25, 8, 25, 8, 25, 8
# cols = list(map('{{:<{}s}}'.format, colwidths))
cols = ['{{:<{}s}}'.format(ci) for ci in colwidths]
colstr = ''.join(cols)
divider = ''.join(['{} '.format('-' * (x - 1)) for x in colwidths])
table_header = colstr.format(*names)
lines = [self._make_header('Isotopes'), table_header, divider]
for k in self.isotope_group.isotope_keys:
iso = self.isotope_group.isotopes[k]
intensity = iso.get_intensity()
line = colstr.format(k, iso.detector,
fformat(intensity), uformat_percent_error(intensity),
fformat(iso.uvalue), uformat_percent_error(iso.uvalue),
fformat(iso.baseline.uvalue), uformat_percent_error(iso.baseline.uvalue),
fformat(iso.blank.uvalue), uformat_percent_error(iso.blank.uvalue), )
lines.append(line)
return self._make_lines(lines)
def _air_ratio(self):
a4038 = self.isotope_group.get_ratio('Ar40/Ar38', non_ic_corr=True)
a4036 = self.isotope_group.get_ratio('Ar40/Ar36', non_ic_corr=True)
# e4038 = uformat_percent_error(a4038, include_percent_sign=True)
# e4036 = uformat_percent_error(a4036, include_percent_sign=True)
lines = [self._make_header('Ratios'),
'Ar40/Ar36= {} {}'.format(floatfmt(nominal_value(a4036)), errorfmt(nominal_value(a4036),
std_dev(a4036))),
'Ar40/Ar38= {} {}'.format(floatfmt(nominal_value(a4038)), errorfmt(nominal_value(a4038),
std_dev(a4038)))]
return self._make_lines(lines)
def _tripped_conditional(self):
ret = ''
if self.tripped_conditional:
lines = [self._make_header('Conditional'),
'TEST= {}'.format(self.tripped_conditional.teststr),
'CTX= {}'.format(self.tripped_conditional.value_context)]
ret = self._make_lines(lines)
return ret
def _make_header(self, h):
return '============================= {} {}'.format(h, '=' * (30 - len(h)))
def _make_lines(self, lines):
return '{}\n'.format('\n'.join(lines))
class AirResult(AutomatedRunResult):
def _make_summary(self):
s = self._air_ratio()
return s
class UnknownResult(AutomatedRunResult):
def _make_summary(self):
lines = ['AGE= {}'.format(self.isotope_group.age)]
return '\n'.join(lines)
class BlankResult(AutomatedRunResult):
def _make_summary(self):
s = self._air_ratio()
return s
if __name__ == '__main__':
from pychron.core.ui.text_editor import myTextEditor
from pychron.processing.isotope import Isotope
from traitsui.api import View, UItem
ig = IsotopeGroup()
a40 = Isotope('Ar40', 'H1')
a40.set_uvalue((50000.12345, 0.4123412341))
a36 = Isotope('Ar36', 'CDD')
a36.set_uvalue((51230.12345 / 295.5, 0.132142341))
a38 = Isotope('Ar38', 'L1')
a38.set_uvalue((51230.12345 / 1590.5, 0.132142341))
ig.isotopes = dict(Ar40=a40, Ar36=a36, Ar38=a38)
ig.age = 1.143
a = AirResult(runid='1234123-01A',
isotope_group=ig)
a.tripped_conditional = AutomatedRunConditional('age>10')
v = View(UItem('summary', style='custom', editor=myTextEditor(editable=False,
fontsize=14)),
title='Summary',
width=1000,
resizable=True)
a.configure_traits(view=v)
# ============= EOF =============================================
|
webisteme/punkmoney
|
refs/heads/master
|
tracker/utils/config_template.py
|
1
|
#!/usr/bin/python
"""
PunkMoney 0.2 :: Configuration File
"""
# COMMUNITY
'''
Currency hashtag, name of trustlist and seed_user to crawl from (no '@'). Must be lowercase!
Please use a different hashtag for testing.
'''
HASHTAG = '#punkmoney'
ALT_HASHTAG = '#pnmy'
# TWITTER API CREDENTIALS
'''
Register an app with read/write access at http://dev.twitter.com.
'''
TW_CONSUMER_KEY = ''
TW_CONSUMER_SECRET = ''
TW_ACCESS_KEY = ''
TW_ACCESS_SECRET = ''
# LOG PATH
'''
Absolute path to a log file in /tracker/logs
'''
LOG_PATH = ''
# MYSQL DATABASE
'''
MySQL database credentials. Socket locations can vary depending on the system.
'''
MYSQL_HOST = 'localhost'
MYSQL_USER = ''
MYSQL_DATABASE = ''
MYSQL_PASSWORD = ''
MYSQL_SOCKET = '/tmp/mysql.sock'
# SETTINGS
'''
Set tweet to true to tweet syntax errors via the main Twitter account.
Set debug to true to log debug messages
TWIPM tweets a weekly summary of activity in the tracker.
'''
SETTINGS = {
'tweet' : False,
'debug' : False,
'twipm' : False,
}
|
yaroslavvb/tensorflow
|
refs/heads/master
|
tensorflow/contrib/image/python/ops/image_ops.py
|
31
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python layer for image_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.util import loader
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import resource_loader
_image_ops = loader.load_op_library(
resource_loader.get_path_to_datafile("_image_ops.so"))
_IMAGE_DTYPES = set(
[dtypes.uint8, dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64])
ops.RegisterShape("ImageProjectiveTransform")(common_shapes.call_cpp_shape_fn)
def rotate(images, angles):
"""Rotate image(s) by the passed angle(s) in radians.
Args:
images: A tensor of shape (num_images, num_rows, num_columns, num_channels)
(NHWC), (num_rows, num_columns, num_channels) (HWC), or
(num_rows, num_columns) (HW).
angles: A scalar angle to rotate all images by, or (if images has rank 4)
a vector of length num_images, with an angle for each image in the batch.
Returns:
Image(s) with the same type and shape as `images`, rotated by the given
angle(s). Empty space due to the rotation will be filled with zeros.
Raises:
TypeError: If `image` is an invalid type.
"""
image_or_images = ops.convert_to_tensor(images, name="images")
angle_or_angles = ops.convert_to_tensor(
angles, name="angles", dtype=dtypes.float32)
if image_or_images.dtype.base_dtype not in _IMAGE_DTYPES:
raise TypeError("Invalid dtype %s." % image_or_images.dtype)
if len(image_or_images.get_shape()) == 2:
images = image_or_images[None, :, :, None]
elif len(image_or_images.get_shape()) == 3:
images = image_or_images[None, :, :, :]
elif len(image_or_images.get_shape()) == 4:
images = image_or_images
else:
raise TypeError("Images should have rank between 2 and 4.")
if len(angle_or_angles.get_shape()) == 0: # pylint: disable=g-explicit-length-test
angles = angle_or_angles[None]
elif len(angle_or_angles.get_shape()) == 1:
angles = angle_or_angles
else:
raise TypeError("Angles should have rank 0 or 1.")
image_width = math_ops.cast(array_ops.shape(images)[2], dtypes.float32)[None]
image_height = math_ops.cast(array_ops.shape(images)[1], dtypes.float32)[None]
x_offset = ((image_width - 1) - (math_ops.cos(angles) *
(image_width - 1) - math_ops.sin(angles) *
(image_height - 1))) / 2.0
y_offset = ((image_height - 1) - (math_ops.sin(angles) *
(image_width - 1) + math_ops.cos(angles) *
(image_height - 1))) / 2.0
num_angles = array_ops.shape(angles)[0]
transforms = array_ops.concat(
values=[
math_ops.cos(angles)[:, None],
-math_ops.sin(angles)[:, None],
x_offset[:, None],
math_ops.sin(angles)[:, None],
math_ops.cos(angles)[:, None],
y_offset[:, None],
array_ops.zeros((num_angles, 2), dtypes.float32),
],
axis=1)
# pylint: disable=protected-access
output = transform(images, transforms)
if len(image_or_images.get_shape()) == 2:
return output[0, :, :, 0]
elif len(image_or_images.get_shape()) == 3:
return output[0, :, :, :]
else:
return output
def transform(images, transforms):
"""Applies the given transform(s) to the image(s).
Args:
images: A tensor of shape (num_images, num_rows, num_columns, num_channels)
(NHWC), (num_rows, num_columns, num_channels) (HWC), or
(num_rows, num_columns) (HW).
transforms: Projective transform matrix/matrices. A vector of length 8 or
tensor of size N x 8. If one row of transforms is
[a0, a1, a2, b0, b1, b2, c0, c1], then it maps the *output* point
`(x, y)` to a transformed *input* point
`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
where `k = c0 x + c1 y + 1`.
Returns:
Image(s) with the same type and shape as `images`, with the given
transform(s) applied. Transformed coordinates outside of the input image
will be filled with zeros.
Raises:
TypeError: If `image` is an invalid type.
"""
image_or_images = ops.convert_to_tensor(images, name="images")
transform_or_transforms = ops.convert_to_tensor(
transforms, name="transforms", dtype=dtypes.float32)
if image_or_images.dtype.base_dtype not in _IMAGE_DTYPES:
raise TypeError("Invalid dtype %s." % image_or_images.dtype)
if len(image_or_images.get_shape()) == 2:
images = image_or_images[None, :, :, None]
elif len(image_or_images.get_shape()) == 3:
images = image_or_images[None, :, :, :]
elif len(image_or_images.get_shape()) == 4:
images = image_or_images
else:
raise TypeError("Images should have rank between 2 and 4.")
if len(transform_or_transforms.get_shape()) == 1:
transforms = transform_or_transforms[None]
elif len(transform_or_transforms.get_shape()) == 2:
transforms = transform_or_transforms
else:
raise TypeError("Transforms should have rank 1 or 2.")
# pylint: disable=protected-access
output = _image_ops.image_projective_transform(images, transforms)
if len(image_or_images.get_shape()) == 2:
return output[0, :, :, 0]
elif len(image_or_images.get_shape()) == 3:
return output[0, :, :, :]
else:
return output
ops.NotDifferentiable("ImageProjectiveTransform")
|
remb0/CouchPotatoServer
|
refs/heads/master
|
couchpotato/core/plugins/manage/main.py
|
2
|
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, addEvent, fireEventAsync
from couchpotato.core.helpers.request import jsonified, getParams
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
import os
import time
log = CPLog(__name__)
class Manage(Plugin):
def __init__(self):
fireEvent('scheduler.interval', identifier = 'manage.update_library', handle = self.updateLibrary, hours = 2)
addEvent('manage.update', self.updateLibrary)
addApiView('manage.update', self.updateLibraryView, docs = {
'desc': 'Update the library by scanning for new movies',
'params': {
'full': {'desc': 'Do a full update or just recently changed/added movies.'},
}
})
if not Env.get('dev'):
addEvent('app.load', self.updateLibrary)
def updateLibraryView(self):
params = getParams()
fireEventAsync('manage.update', full = params.get('full', True))
return jsonified({
'success': True
})
def updateLibrary(self, full = True):
last_update = float(Env.prop('manage.last_update', default = 0))
if self.isDisabled() or (last_update > time.time() - 20):
return
directories = self.directories()
added_identifiers = []
for directory in directories:
if not os.path.isdir(directory):
if len(directory) > 0:
log.error('Directory doesn\'t exist: %s' % directory)
continue
log.info('Updating manage library: %s' % directory)
identifiers = fireEvent('scanner.folder', folder = directory, newer_than = last_update, single = True)
if identifiers:
added_identifiers.extend(identifiers)
# Break if CP wants to shut down
if self.shuttingDown():
break
# If cleanup option is enabled, remove offline files from database
if self.conf('cleanup') and full and not self.shuttingDown():
# Get movies with done status
done_movies = fireEvent('movie.list', status = 'done', single = True)
for done_movie in done_movies:
if done_movie['library']['identifier'] not in added_identifiers:
fireEvent('movie.delete', movie_id = done_movie['id'])
Env.prop('manage.last_update', time.time())
def directories(self):
try:
return [x.strip() for x in self.conf('library', default = '').split('::')]
except:
return []
|
40223231/Final-Exam-with-classmate
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/locale.py
|
624
|
def getdefaultlocale():
return __BRYTHON__.language,None
def localeconv():
""" localeconv() -> dict.
Returns numeric and monetary locale-specific parameters.
"""
# 'C' locale default values
return {'grouping': [127],
'currency_symbol': '',
'n_sign_posn': 127,
'p_cs_precedes': 127,
'n_cs_precedes': 127,
'mon_grouping': [],
'n_sep_by_space': 127,
'decimal_point': '.',
'negative_sign': '',
'positive_sign': '',
'p_sep_by_space': 127,
'decimal_point': '.',
'negative_sign': '',
'positive_sign': '',
'p_sep_by_space': 127,
'int_curr_symbol': '',
'p_sign_posn': 127,
'thousands_sep': '',
'mon_thousands_sep': '',
'frac_digits': 127,
'mon_decimal_point': '',
'int_frac_digits': 127}
def setlocale(category, value=None):
""" setlocale(integer,string=None) -> string.
Activates/queries locale processing.
"""
if value not in (None, '', 'C'):
raise Error('_locale emulation only supports "C" locale')
return 'C'
CHAR_MAX = 127
LC_ALL = 6
LC_COLLATE = 3
LC_CTYPE = 0
LC_MESSAGES = 5
LC_MONETARY = 4
LC_NUMERIC = 1
LC_TIME = 2
Error = ValueError
def getlocale(category=LC_CTYPE):
""" Returns the current setting for the given locale category as
tuple (language code, encoding).
category may be one of the LC_* value except LC_ALL. It
defaults to LC_CTYPE.
Except for the code 'C', the language code corresponds to RFC
1766. code and encoding can be None in case the values cannot
be determined.
"""
return None, None
|
nmearl/pyqtgraph
|
refs/heads/develop
|
examples/VideoTemplate_pyside.py
|
24
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './examples/VideoTemplate.ui'
#
# Created: Mon Feb 17 20:39:30 2014
# by: pyside-uic 0.2.14 running on PySide 1.1.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(695, 798)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_2 = QtGui.QGridLayout(self.centralwidget)
self.gridLayout_2.setObjectName("gridLayout_2")
self.downsampleCheck = QtGui.QCheckBox(self.centralwidget)
self.downsampleCheck.setObjectName("downsampleCheck")
self.gridLayout_2.addWidget(self.downsampleCheck, 8, 0, 1, 2)
self.scaleCheck = QtGui.QCheckBox(self.centralwidget)
self.scaleCheck.setObjectName("scaleCheck")
self.gridLayout_2.addWidget(self.scaleCheck, 4, 0, 1, 1)
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.rawRadio = QtGui.QRadioButton(self.centralwidget)
self.rawRadio.setObjectName("rawRadio")
self.gridLayout.addWidget(self.rawRadio, 3, 0, 1, 1)
self.gfxRadio = QtGui.QRadioButton(self.centralwidget)
self.gfxRadio.setChecked(True)
self.gfxRadio.setObjectName("gfxRadio")
self.gridLayout.addWidget(self.gfxRadio, 2, 0, 1, 1)
self.stack = QtGui.QStackedWidget(self.centralwidget)
self.stack.setObjectName("stack")
self.page = QtGui.QWidget()
self.page.setObjectName("page")
self.gridLayout_3 = QtGui.QGridLayout(self.page)
self.gridLayout_3.setObjectName("gridLayout_3")
self.graphicsView = GraphicsView(self.page)
self.graphicsView.setObjectName("graphicsView")
self.gridLayout_3.addWidget(self.graphicsView, 0, 0, 1, 1)
self.stack.addWidget(self.page)
self.page_2 = QtGui.QWidget()
self.page_2.setObjectName("page_2")
self.gridLayout_4 = QtGui.QGridLayout(self.page_2)
self.gridLayout_4.setObjectName("gridLayout_4")
self.rawImg = RawImageWidget(self.page_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.rawImg.sizePolicy().hasHeightForWidth())
self.rawImg.setSizePolicy(sizePolicy)
self.rawImg.setObjectName("rawImg")
self.gridLayout_4.addWidget(self.rawImg, 0, 0, 1, 1)
self.stack.addWidget(self.page_2)
self.page_3 = QtGui.QWidget()
self.page_3.setObjectName("page_3")
self.gridLayout_5 = QtGui.QGridLayout(self.page_3)
self.gridLayout_5.setObjectName("gridLayout_5")
self.rawGLImg = RawImageGLWidget(self.page_3)
self.rawGLImg.setObjectName("rawGLImg")
self.gridLayout_5.addWidget(self.rawGLImg, 0, 0, 1, 1)
self.stack.addWidget(self.page_3)
self.gridLayout.addWidget(self.stack, 0, 0, 1, 1)
self.rawGLRadio = QtGui.QRadioButton(self.centralwidget)
self.rawGLRadio.setObjectName("rawGLRadio")
self.gridLayout.addWidget(self.rawGLRadio, 4, 0, 1, 1)
self.gridLayout_2.addLayout(self.gridLayout, 1, 0, 1, 4)
self.dtypeCombo = QtGui.QComboBox(self.centralwidget)
self.dtypeCombo.setObjectName("dtypeCombo")
self.dtypeCombo.addItem("")
self.dtypeCombo.addItem("")
self.dtypeCombo.addItem("")
self.gridLayout_2.addWidget(self.dtypeCombo, 3, 2, 1, 1)
self.label = QtGui.QLabel(self.centralwidget)
self.label.setObjectName("label")
self.gridLayout_2.addWidget(self.label, 3, 0, 1, 1)
self.rgbLevelsCheck = QtGui.QCheckBox(self.centralwidget)
self.rgbLevelsCheck.setObjectName("rgbLevelsCheck")
self.gridLayout_2.addWidget(self.rgbLevelsCheck, 4, 1, 1, 1)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.minSpin2 = SpinBox(self.centralwidget)
self.minSpin2.setEnabled(False)
self.minSpin2.setObjectName("minSpin2")
self.horizontalLayout_2.addWidget(self.minSpin2)
self.label_3 = QtGui.QLabel(self.centralwidget)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.horizontalLayout_2.addWidget(self.label_3)
self.maxSpin2 = SpinBox(self.centralwidget)
self.maxSpin2.setEnabled(False)
self.maxSpin2.setObjectName("maxSpin2")
self.horizontalLayout_2.addWidget(self.maxSpin2)
self.gridLayout_2.addLayout(self.horizontalLayout_2, 5, 2, 1, 1)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.minSpin1 = SpinBox(self.centralwidget)
self.minSpin1.setObjectName("minSpin1")
self.horizontalLayout.addWidget(self.minSpin1)
self.label_2 = QtGui.QLabel(self.centralwidget)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.horizontalLayout.addWidget(self.label_2)
self.maxSpin1 = SpinBox(self.centralwidget)
self.maxSpin1.setObjectName("maxSpin1")
self.horizontalLayout.addWidget(self.maxSpin1)
self.gridLayout_2.addLayout(self.horizontalLayout, 4, 2, 1, 1)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.minSpin3 = SpinBox(self.centralwidget)
self.minSpin3.setEnabled(False)
self.minSpin3.setObjectName("minSpin3")
self.horizontalLayout_3.addWidget(self.minSpin3)
self.label_4 = QtGui.QLabel(self.centralwidget)
self.label_4.setAlignment(QtCore.Qt.AlignCenter)
self.label_4.setObjectName("label_4")
self.horizontalLayout_3.addWidget(self.label_4)
self.maxSpin3 = SpinBox(self.centralwidget)
self.maxSpin3.setEnabled(False)
self.maxSpin3.setObjectName("maxSpin3")
self.horizontalLayout_3.addWidget(self.maxSpin3)
self.gridLayout_2.addLayout(self.horizontalLayout_3, 6, 2, 1, 1)
self.lutCheck = QtGui.QCheckBox(self.centralwidget)
self.lutCheck.setObjectName("lutCheck")
self.gridLayout_2.addWidget(self.lutCheck, 7, 0, 1, 1)
self.alphaCheck = QtGui.QCheckBox(self.centralwidget)
self.alphaCheck.setObjectName("alphaCheck")
self.gridLayout_2.addWidget(self.alphaCheck, 7, 1, 1, 1)
self.gradient = GradientWidget(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.gradient.sizePolicy().hasHeightForWidth())
self.gradient.setSizePolicy(sizePolicy)
self.gradient.setObjectName("gradient")
self.gridLayout_2.addWidget(self.gradient, 7, 2, 1, 2)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem, 3, 3, 1, 1)
self.fpsLabel = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(12)
self.fpsLabel.setFont(font)
self.fpsLabel.setAlignment(QtCore.Qt.AlignCenter)
self.fpsLabel.setObjectName("fpsLabel")
self.gridLayout_2.addWidget(self.fpsLabel, 0, 0, 1, 4)
self.rgbCheck = QtGui.QCheckBox(self.centralwidget)
self.rgbCheck.setObjectName("rgbCheck")
self.gridLayout_2.addWidget(self.rgbCheck, 3, 1, 1, 1)
self.label_5 = QtGui.QLabel(self.centralwidget)
self.label_5.setObjectName("label_5")
self.gridLayout_2.addWidget(self.label_5, 2, 0, 1, 1)
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.framesSpin = QtGui.QSpinBox(self.centralwidget)
self.framesSpin.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons)
self.framesSpin.setProperty("value", 10)
self.framesSpin.setObjectName("framesSpin")
self.horizontalLayout_4.addWidget(self.framesSpin)
self.widthSpin = QtGui.QSpinBox(self.centralwidget)
self.widthSpin.setButtonSymbols(QtGui.QAbstractSpinBox.PlusMinus)
self.widthSpin.setMaximum(10000)
self.widthSpin.setProperty("value", 512)
self.widthSpin.setObjectName("widthSpin")
self.horizontalLayout_4.addWidget(self.widthSpin)
self.heightSpin = QtGui.QSpinBox(self.centralwidget)
self.heightSpin.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons)
self.heightSpin.setMaximum(10000)
self.heightSpin.setProperty("value", 512)
self.heightSpin.setObjectName("heightSpin")
self.horizontalLayout_4.addWidget(self.heightSpin)
self.gridLayout_2.addLayout(self.horizontalLayout_4, 2, 1, 1, 2)
self.sizeLabel = QtGui.QLabel(self.centralwidget)
self.sizeLabel.setText("")
self.sizeLabel.setObjectName("sizeLabel")
self.gridLayout_2.addWidget(self.sizeLabel, 2, 3, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
self.stack.setCurrentIndex(2)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "MainWindow", None, QtGui.QApplication.UnicodeUTF8))
self.downsampleCheck.setText(QtGui.QApplication.translate("MainWindow", "Auto downsample", None, QtGui.QApplication.UnicodeUTF8))
self.scaleCheck.setText(QtGui.QApplication.translate("MainWindow", "Scale Data", None, QtGui.QApplication.UnicodeUTF8))
self.rawRadio.setText(QtGui.QApplication.translate("MainWindow", "RawImageWidget", None, QtGui.QApplication.UnicodeUTF8))
self.gfxRadio.setText(QtGui.QApplication.translate("MainWindow", "GraphicsView + ImageItem", None, QtGui.QApplication.UnicodeUTF8))
self.rawGLRadio.setText(QtGui.QApplication.translate("MainWindow", "RawGLImageWidget", None, QtGui.QApplication.UnicodeUTF8))
self.dtypeCombo.setItemText(0, QtGui.QApplication.translate("MainWindow", "uint8", None, QtGui.QApplication.UnicodeUTF8))
self.dtypeCombo.setItemText(1, QtGui.QApplication.translate("MainWindow", "uint16", None, QtGui.QApplication.UnicodeUTF8))
self.dtypeCombo.setItemText(2, QtGui.QApplication.translate("MainWindow", "float", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("MainWindow", "Data type", None, QtGui.QApplication.UnicodeUTF8))
self.rgbLevelsCheck.setText(QtGui.QApplication.translate("MainWindow", "RGB", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("MainWindow", "<--->", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("MainWindow", "<--->", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("MainWindow", "<--->", None, QtGui.QApplication.UnicodeUTF8))
self.lutCheck.setText(QtGui.QApplication.translate("MainWindow", "Use Lookup Table", None, QtGui.QApplication.UnicodeUTF8))
self.alphaCheck.setText(QtGui.QApplication.translate("MainWindow", "alpha", None, QtGui.QApplication.UnicodeUTF8))
self.fpsLabel.setText(QtGui.QApplication.translate("MainWindow", "FPS", None, QtGui.QApplication.UnicodeUTF8))
self.rgbCheck.setText(QtGui.QApplication.translate("MainWindow", "RGB", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("MainWindow", "Image size", None, QtGui.QApplication.UnicodeUTF8))
from pyqtgraph.widgets.RawImageWidget import RawImageGLWidget, RawImageWidget
from pyqtgraph import GradientWidget, SpinBox, GraphicsView
|
krisys/django
|
refs/heads/master
|
django/conf/locale/zh_Hans/formats.py
|
1008
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'Y年n月j日' # 2016年9月5日
TIME_FORMAT = 'H:i' # 20:45
DATETIME_FORMAT = 'Y年n月j日 H:i' # 2016年9月5日 20:45
YEAR_MONTH_FORMAT = 'Y年n月' # 2016年9月
MONTH_DAY_FORMAT = 'm月j日' # 9月5日
SHORT_DATE_FORMAT = 'Y年n月j日' # 2016年9月5日
SHORT_DATETIME_FORMAT = 'Y年n月j日 H:i' # 2016年9月5日 20:45
FIRST_DAY_OF_WEEK = 1 # 星期一 (Monday)
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%Y/%m/%d', # '2016/09/05'
'%Y-%m-%d', # '2016-09-05'
'%Y年%n月%j日', # '2016年9月5日'
]
TIME_INPUT_FORMATS = [
'%H:%M', # '20:45'
'%H:%M:%S', # '20:45:29'
'%H:%M:%S.%f', # '20:45:29.000200'
]
DATETIME_INPUT_FORMATS = [
'%Y/%m/%d %H:%M', # '2016/09/05 20:45'
'%Y-%m-%d %H:%M', # '2016-09-05 20:45'
'%Y年%n月%j日 %H:%M', # '2016年9月5日 14:45'
'%Y/%m/%d %H:%M:%S', # '2016/09/05 20:45:29'
'%Y-%m-%d %H:%M:%S', # '2016-09-05 20:45:29'
'%Y年%n月%j日 %H:%M:%S', # '2016年9月5日 20:45:29'
'%Y/%m/%d %H:%M:%S.%f', # '2016/09/05 20:45:29.000200'
'%Y-%m-%d %H:%M:%S.%f', # '2016-09-05 20:45:29.000200'
'%Y年%n月%j日 %H:%n:%S.%f', # '2016年9月5日 20:45:29.000200'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ''
NUMBER_GROUPING = 4
|
zerodark/httpie
|
refs/heads/master
|
httpie/client.py
|
16
|
import json
import sys
from pprint import pformat
import requests
from requests.packages import urllib3
from httpie import sessions
from httpie import __version__
from httpie.compat import str
from httpie.plugins import plugin_manager
# https://urllib3.readthedocs.org/en/latest/security.html
urllib3.disable_warnings()
FORM = 'application/x-www-form-urlencoded; charset=utf-8'
JSON = 'application/json'
DEFAULT_UA = 'HTTPie/%s' % __version__
def get_requests_session():
requests_session = requests.Session()
for cls in plugin_manager.get_trasnsport_plugins():
transport_plugin = cls()
requests_session.mount(prefix=transport_plugin.prefix,
adapter=transport_plugin.get_adapter())
return requests_session
def get_response(args, config_dir):
"""Send the request and return a `request.Response`."""
requests_session = get_requests_session()
if not args.session and not args.session_read_only:
kwargs = get_requests_kwargs(args)
if args.debug:
dump_request(kwargs)
response = requests_session.request(**kwargs)
else:
response = sessions.get_response(
requests_session=requests_session,
args=args,
config_dir=config_dir,
session_name=args.session or args.session_read_only,
read_only=bool(args.session_read_only),
)
return response
def dump_request(kwargs):
sys.stderr.write('\n>>> requests.request(**%s)\n\n'
% pformat(kwargs))
def encode_headers(headers):
# This allows for unicode headers which is non-standard but practical.
# See: https://github.com/jkbrzt/httpie/issues/212
return dict(
(name, value.encode('utf8') if isinstance(value, str) else value)
for name, value in headers.items()
)
def get_default_headers(args):
default_headers = {
'User-Agent': DEFAULT_UA
}
auto_json = args.data and not args.form
# FIXME: Accept is set to JSON with `http url @./file.txt`.
if args.json or auto_json:
default_headers['Accept'] = 'application/json'
if args.json or (auto_json and args.data):
default_headers['Content-Type'] = JSON
elif args.form and not args.files:
# If sending files, `requests` will set
# the `Content-Type` for us.
default_headers['Content-Type'] = FORM
return default_headers
def get_requests_kwargs(args, base_headers=None):
"""
Translate our `args` into `requests.request` keyword arguments.
"""
# Serialize JSON data, if needed.
data = args.data
auto_json = data and not args.form
if (args.json or auto_json) and isinstance(data, dict):
if data:
data = json.dumps(data)
else:
# We need to set data to an empty string to prevent requests
# from assigning an empty list to `response.request.data`.
data = ''
# Finalize headers.
headers = get_default_headers(args)
if base_headers:
headers.update(base_headers)
headers.update(args.headers)
headers = encode_headers(headers)
credentials = None
if args.auth:
auth_plugin = plugin_manager.get_auth_plugin(args.auth_type)()
credentials = auth_plugin.get_auth(args.auth.key, args.auth.value)
cert = None
if args.cert:
cert = args.cert
if args.cert_key:
cert = cert, args.cert_key
kwargs = {
'stream': True,
'method': args.method.lower(),
'url': args.url,
'headers': headers,
'data': data,
'verify': {
'yes': True,
'no': False
}.get(args.verify, args.verify),
'cert': cert,
'timeout': args.timeout,
'auth': credentials,
'proxies': dict((p.key, p.value) for p in args.proxy),
'files': args.files,
'allow_redirects': args.follow,
'params': args.params,
}
return kwargs
|
myusuf/google_app
|
refs/heads/master
|
Lesson_5/00_Conference_Central/models.py
|
34
|
#!/usr/bin/env python
"""models.py
Udacity conference server-side Python App Engine data & ProtoRPC models
$Id: models.py,v 1.1 2014/05/24 22:01:10 wesc Exp $
created/forked from conferences.py by wesc on 2014 may 24
"""
__author__ = 'wesc+api@google.com (Wesley Chun)'
import httplib
import endpoints
from protorpc import messages
from google.appengine.ext import ndb
class ConflictException(endpoints.ServiceException):
"""ConflictException -- exception mapped to HTTP 409 response"""
http_status = httplib.CONFLICT
class Profile(ndb.Model):
"""Profile -- User profile object"""
displayName = ndb.StringProperty()
mainEmail = ndb.StringProperty()
teeShirtSize = ndb.StringProperty(default='NOT_SPECIFIED')
conferenceKeysToAttend = ndb.StringProperty(repeated=True)
class ProfileMiniForm(messages.Message):
"""ProfileMiniForm -- update Profile form message"""
displayName = messages.StringField(1)
teeShirtSize = messages.EnumField('TeeShirtSize', 2)
class ProfileForm(messages.Message):
"""ProfileForm -- Profile outbound form message"""
displayName = messages.StringField(1)
mainEmail = messages.StringField(2)
teeShirtSize = messages.EnumField('TeeShirtSize', 3)
conferenceKeysToAttend = messages.StringField(4, repeated=True)
class BooleanMessage(messages.Message):
"""BooleanMessage-- outbound Boolean value message"""
data = messages.BooleanField(1)
class Conference(ndb.Model):
"""Conference -- Conference object"""
name = ndb.StringProperty(required=True)
description = ndb.StringProperty()
organizerUserId = ndb.StringProperty()
topics = ndb.StringProperty(repeated=True)
city = ndb.StringProperty()
startDate = ndb.DateProperty()
month = ndb.IntegerProperty() # TODO: do we need for indexing like Java?
endDate = ndb.DateProperty()
maxAttendees = ndb.IntegerProperty()
seatsAvailable = ndb.IntegerProperty()
class ConferenceForm(messages.Message):
"""ConferenceForm -- Conference outbound form message"""
name = messages.StringField(1)
description = messages.StringField(2)
organizerUserId = messages.StringField(3)
topics = messages.StringField(4, repeated=True)
city = messages.StringField(5)
startDate = messages.StringField(6) #DateTimeField()
month = messages.IntegerField(7)
maxAttendees = messages.IntegerField(8)
seatsAvailable = messages.IntegerField(9)
endDate = messages.StringField(10) #DateTimeField()
websafeKey = messages.StringField(11)
organizerDisplayName = messages.StringField(12)
class ConferenceForms(messages.Message):
"""ConferenceForms -- multiple Conference outbound form message"""
items = messages.MessageField(ConferenceForm, 1, repeated=True)
class TeeShirtSize(messages.Enum):
"""TeeShirtSize -- t-shirt size enumeration value"""
NOT_SPECIFIED = 1
XS_M = 2
XS_W = 3
S_M = 4
S_W = 5
M_M = 6
M_W = 7
L_M = 8
L_W = 9
XL_M = 10
XL_W = 11
XXL_M = 12
XXL_W = 13
XXXL_M = 14
XXXL_W = 15
class ConferenceQueryForm(messages.Message):
"""ConferenceQueryForm -- Conference query inbound form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class ConferenceQueryForms(messages.Message):
"""ConferenceQueryForms -- multiple ConferenceQueryForm inbound form message"""
filters = messages.MessageField(ConferenceQueryForm, 1, repeated=True)
|
opennode/waldur-mastermind
|
refs/heads/develop
|
src/waldur_core/structure/migrations/0021_project_backend_id.py
|
1
|
# Generated by Django 2.2.13 on 2021-03-08 19:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('structure', '0020_drop_servicecertification_model'),
]
operations = [
migrations.AddField(
model_name='project',
name='backend_id',
field=models.CharField(blank=True, max_length=255),
),
]
|
akoerner/HCC-Swanson
|
refs/heads/master
|
Tools/HCCPlot.py
|
1
|
# copyright 2013 UNL Holland Computing Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from time import clock, time
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.font_manager import FontProperties
from matplotlib.path import Path
from matplotlib.collections import PatchCollection
from matplotlib.collections import PathCollection
import numpy
def transformByteToMB(data):
'''transform the data that is obtained parsing the root file into the
data that can be plotted using the two methods in this class'''
scale = pow(2,20)
newData = []
for i in data:
mb = i[0] / scale
offset = i[0] % scale
end = offset + i[1]
name = i[3]
basket = (mb,offset,end,name)
#need to add for baskets that go off of the end and rescale
if end > scale:
temp = []
length = end
basket = (mb,offset, scale,name)
temp.append(basket)
length = length - offset
while length > 0:
mb = mb+1
loc = 0
end = 0
if(length < scale):
end = length
else:
end = scale
length = length - scale
temp.append([mb, loc,end, name])
newData += temp
else:
newData.append(basket)
return newData
def getColor(branch ,colorMap):
'''Get the color from the color map'''
if branch in colorMap:
return colorMap[branch]
else:
return 'black'
def plotFileLayout(data, display, outName, colorMap, legendBranches,limits = None):
'''plot the file layout data given a a list of MB transformed by the method
in this file'''
#keep track of maximum x and y for bounds
maxy = 0
maxx = 10
height = 1024
arts =[]
labels = []
#keep track of branch and color
curBranch = ''
#Plot the data
branchPaths = []
#movment same every time
codes =[Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
#get axis
ax = plt.gca()
for point in data:
#get the color if it is a differnet branch add it to the collection
if point[3] != curBranch:
if len(branchPaths) != 0:
color = getColor(curBranch, colorMap)
#stuff for the legend
if legendBranches != None and curBranch in legendBranches:
arts.append(mpatches.Rectangle((0,0),1,1,fc=color))
labels.append(curBranch)
#add it to the collection and
ax.add_collection(PathCollection(branchPaths,facecolor=color,edgecolor=color, linewidth=.0))
curBranch = point[3]
branchPaths = []
#take care of color map stuff
#get x and y
y = point[0] * height
x = point[1]
w = point[2] - point[1]
verts = [ (x,y),
(x,y+height),
(x+w,y+height),
(x+w,y),
(x,y),
]
branchPaths.append(Path(verts, codes))
#update maximums
if maxy < y + height:
maxy = y+height
if (x+w) > maxx:
maxx = (x+w)
#must do one last update of the color
if len(branchPaths) != 0:
color = getColor(curBranch, colorMap)
#stuff for the legend
if legendBranches != None and curBranch in legendBranches:
arts.append(mpatches.Rectangle((0,0),1,1,fc=color))
labels.append(curBranch)
#add it to the collection and
ax.add_collection(PathCollection(branchPaths,facecolor=color,edgecolor=color, linewidth=.0))
path = PathCollection(branchPaths,facecolor=color,edgecolor=color)
#add the collection to the graphic
ax.add_collection(path)
if(limits == None):
plt.xlim((0,maxx))
plt.ylim((0,maxy))
else:
plt.xlim((0,maxx))
plt.ylim(limits)
#set up axis tics etc
plt.xlabel('Offset within Mb (kb)')
plt.ylabel('Mb Offset in File')
plt.title('File Layout Graph')
#fix the labels
plt.draw()
#fix the y labels
numTicks = maxy / 1024
if numTicks > 10:
numTicks = 10
spacing = int((maxy)/numTicks)
locs = [y for y in range(0, maxy,spacing)]
labs = [str(y/1024) for y in locs]
plt.yticks(locs,labs)
locs = [256, 512, 789, 1024]
locs = [x * 1024 for x in locs]
labs = [str(x/1024) for x in locs]
plt.xticks(locs,labs)
#display or save it
if(display):
plt.show()
else:
plt.savefig(outName)
if legendBranches != None:
fp = FontProperties()
fp.set_size('small')
fig = plt.figure()
fig.legend(arts, labels, loc='upper left', mode='expand', prop = fp)
plt.savefig(outName[:-4] + '_legend' + '.png')
def plotUsage(data, name='File Usage' ,outname = 'filestuff', limits = None, points=None, subtitle = None):
'''plot the file layout data given a a list of MB transformed by the method
in this file'''
plt.figure()
newData = []
row = 0
for i in data:
if row % 2 == 0:
newData.append(i)
row = row + 1
data = numpy.array(newData)
# define the colormap
clrMap = plt.cm.jet
# extract all colors from the .jet map
cmaplist = [clrMap(i) for i in xrange(clrMap.N)]
# force the first color entry to be grey
cmaplist[0] = (1.0,1.0,1.0,1.0)
# create the new map
clrMap = clrMap.from_list('custommap', cmaplist, clrMap.N)
cm.register_cmap(name='custommap', cmap=clrMap)
#get axis
maxy = len(data)
maxx = pow(1,20)
plt.pcolormesh(data,vmin = 1, cmap='custommap')#, cmap = mcolor.colormap('gist_ncar'))
plt.xlabel('Offset within Mb (kb)')
plt.ylabel('Mb Offset in File')
if len(name) > 100:
plt.title(name, fontsize=8)
else:
plt.title(name, fontsize=10)
plt.colorbar()
if points != None:
px = [i[0] for i in points]
py = [i[1]/2 for i in points]
plt.scatter(px,py, marker=(5,1), c='goldenrod')
#fix the y labels
spacing = int(maxy/10)
locs = [y for y in xrange(0, maxy,spacing)]
labs = [str(y) for y in locs]
plt.yticks(locs,labs)
plt.xlim(0,maxx)
plt.ylim(0,maxy)
locs = [256, 512, 789, 1024]
labs = [str(x) for x in locs]
plt.xticks(locs,labs)
plt.savefig(outname+ '.png')
def plotFileLayoutTool2(data, display, outName, colorMap, legendBranches,limits = None, title = 'File Layout Graph' , fileUsage = None):
'''plot the file layout data given a a list of MB transformed by the method
in this file'''
plt.figure()
#keep track of maximum x and y for bounds
stime = time()
maxy = 0
maxx = 10
height = 2
halfway = height
if fileUsage != None:
halfway = height * 1/2.
arts =[]
labels = []
if(fileUsage != None):
# define the colormap
cmap = plt.cm.jet
cmap.set_bad('white')
#get min and max
mini = fileUsage.min()
maxi = fileUsage.max()
plt.pcolormesh(fileUsage,vmin = mini,vmax = maxi, cmap=cmap)
plt.colorbar()
len1 = len(fileUsage)
len2 = len(fileUsage[0])
#clean it up
del fileUsage
#make the background black sort of hackish but color every other row black
arr = numpy.zeros((len1,len2) )
for i in xrange(1,len1,2):
for j in xrange(len2):
arr[i][j] = 1
cmap =LinearSegmentedColormap.from_list('map1', ['black', 'black'])
plt.pcolormesh(numpy.ma.masked_equal(arr,0), cmap=cmap)
del arr
#keep track of branch and color
curBranch = ''
#Plot the data
branchPaths = []
#movment same every time
codes =[Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
#get axis
ax = plt.gca()
#fixes annoying bug
if len(data) == 0:
color = 'black'
for point in data:
#get the color if it is a differnet branch add it to the collection
if point[3] != curBranch:
if len(branchPaths) != 0:
color = getColor(curBranch, colorMap)
#stuff for the legend
if legendBranches != None and curBranch in legendBranches:
arts.append(mpatches.Rectangle((0,0),1,1,fc=color))
labels.append(curBranch)
#add it to the collection and
ax.add_collection(PathCollection(branchPaths,facecolor=color,edgecolor=color, linewidth=.0))
curBranch = point[3]
branchPaths = []
#take care of color map stuff
#get x and y
y = point[0] * height
x = point[1] / 1024
w = point[2] /1024- point[1] / 1024
verts = [ (x,y+halfway),
(x,y+height),
(x+w,y+height),
(x+w,y+halfway),
(x,y+halfway),
]
branchPaths.append(Path(verts, codes))
#update maximums
if maxy < y + height:
maxy = y+height
if (x+w) > maxx:
maxx = (x+w)
#must do one last update of the color
if len(branchPaths) != 0:
color = getColor(curBranch, colorMap)
#stuff for the legend
if legendBranches != None and curBranch in legendBranches:
arts.append(mpatches.Rectangle((0,0),1,1,fc=color))
labels.append(curBranch)
#add it to the collection and
ax.add_collection(PathCollection(branchPaths,facecolor=color,edgecolor=color, linewidth=.0))
path = PathCollection(branchPaths,facecolor=color,edgecolor=color)
#add the collection to the graphic
ax.add_collection(path)
starty = 0
if(limits == None):
plt.xlim((0,maxx))
plt.ylim((0,maxy))
else:
plt.xlim((0,maxx))
plt.ylim((limits[0] , limits[1]) )
starty = limits[0]
maxy = limits[1]
#set up axis tics etc
plt.xlabel('Offset within Mb (kb)')
plt.ylabel('Mb Offset in File')
if len(title) > 100:
plt.title(title, fontsize=8)
else:
plt.title(title, fontsize=10)
#fix the labels
plt.draw()
#fix the y labels
numTicks = maxy - starty
if numTicks > 10:
numTicks = 10
spacing = int((maxy-starty)/numTicks)
if(spacing > 0):
locs = [y for y in xrange(starty, maxy,spacing)]
labs = [str(y/2) for y in locs]
plt.yticks(locs,labs)
locs = [256, 512, 789, 1024]
locs = [x for x in locs]
labs = [str(x) for x in locs]
plt.xticks(locs,labs)
#display or save it
if(display):
plt.show()
else:
plt.savefig(outName)
if legendBranches != None:
fp = FontProperties()
fp.set_size('small')
fig = plt.figure()
fig.legend(arts, labels, loc='upper left', mode='expand', prop = fp)
plt.savefig(outName[:-4] + '_legend' + '.png')
etime = time()
def getName(file):
'''get the name from a file name
this is simply the .root removed and .png appened'''
idx = file.rfind('.')
return file[:idx] + '.png'
|
micropython/micropython-esp32
|
refs/heads/esp32
|
tests/cpydiff/types_bytes_subscrstep.py
|
24
|
"""
categories: Types,bytes
description: Bytes subscription with step != 1 not implemented
cause: MicroPython is highly optimized for memory usage.
workaround: Use explicit loop for this very rare operation.
"""
print(b'123'[0:3:2])
|
josthkko/ggrc-core
|
refs/heads/develop
|
test/unit/ggrc/models/hooks/__init__.py
|
256
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
|
shiora/The-Perfect-Pokemon-Team-Balancer
|
refs/heads/master
|
libs/env/Lib/site-packages/whoosh/util/text.py
|
96
|
# Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
import codecs, re
from whoosh.compat import string_type, u, byte
# Note: these functions return a tuple of (text, length), so when you call
# them, you have to add [0] on the end, e.g. str = utf8encode(unicode)[0]
utf8encode = codecs.getencoder("utf-8")
utf8decode = codecs.getdecoder("utf-8")
# Prefix encoding functions
def first_diff(a, b):
"""
Returns the position of the first differing character in the sequences a
and b. For example, first_diff('render', 'rending') == 4. This function
limits the return value to 255 so the difference can be encoded in a single
byte.
"""
i = 0
while i <= 255 and i < len(a) and i < len(b) and a[i] == b[i]:
i += 1
return i
def prefix_encode(a, b):
"""
Compresses bytestring b as a byte representing the prefix it shares with a,
followed by the suffix bytes.
"""
i = first_diff(a, b)
return byte(i) + b[i:]
def prefix_encode_all(ls):
"""Compresses the given list of (unicode) strings by storing each string
(except the first one) as an integer (encoded in a byte) representing
the prefix it shares with its predecessor, followed by the suffix encoded
as UTF-8.
"""
last = u('')
for w in ls:
i = first_diff(last, w)
yield chr(i) + w[i:].encode("utf-8")
last = w
def prefix_decode_all(ls):
"""Decompresses a list of strings compressed by prefix_encode().
"""
last = u('')
for w in ls:
i = ord(w[0])
decoded = last[:i] + w[1:].decode("utf-8")
yield decoded
last = decoded
# Natural key sorting function
_nkre = re.compile(r"\D+|\d+", re.UNICODE)
def _nkconv(i):
try:
return int(i)
except ValueError:
return i.lower()
def natural_key(s):
"""Converts string ``s`` into a tuple that will sort "naturally" (i.e.,
``name5`` will come before ``name10`` and ``1`` will come before ``A``).
This function is designed to be used as the ``key`` argument to sorting
functions.
:param s: the str/unicode string to convert.
:rtype: tuple
"""
# Use _nkre to split the input string into a sequence of
# digit runs and non-digit runs. Then use _nkconv() to convert
# the digit runs into ints and the non-digit runs to lowercase.
return tuple(_nkconv(m) for m in _nkre.findall(s))
# Regular expression functions
def rcompile(pattern, flags=0, verbose=False):
"""A wrapper for re.compile that checks whether "pattern" is a regex object
or a string to be compiled, and automatically adds the re.UNICODE flag.
"""
if not isinstance(pattern, string_type):
# If it's not a string, assume it's already a compiled pattern
return pattern
if verbose:
flags |= re.VERBOSE
return re.compile(pattern, re.UNICODE | flags)
|
boundarydevices/android_external_chromium_org
|
refs/heads/cm-12.0
|
tools/telemetry/telemetry/unittest/decorators_unittest.py
|
14
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry import decorators
class Foo(object):
pass
def CreateFooUncached(_):
return Foo()
@decorators.Cache
def CreateFooCached(_):
return Foo()
class DecoratorsUnitTest(unittest.TestCase):
def testCacheDecorator(self):
self.assertNotEquals(CreateFooUncached(1), CreateFooUncached(2))
self.assertNotEquals(CreateFooCached(1), CreateFooCached(2))
self.assertNotEquals(CreateFooUncached(1), CreateFooUncached(1))
self.assertEquals(CreateFooCached(1), CreateFooCached(1))
|
takluyver/tabipy
|
refs/heads/master
|
tests/test_basic.py
|
1
|
import re
from tabipy import Table, TableRow, TableCell, TableHeaderRow, TableHeader
def test_simple():
# For now, this is little more than a smoke test to check that it's not
# erroring out.
t = Table(TableHeaderRow('a','b','c'),
(1, 2, 3),
(2, 4, 6),
)
html = t._repr_html_()
assert '<th' in html
assert '<td' in html
latex = t._repr_latex_()
assert r'\hline' in latex
def test_tableheader():
t = Table((TableHeader('a'), 1, 2),
(TableHeader('b'), 3, 4),
(TableHeader('c'), 5, 6))
html = t._repr_html_()
assert html.count('<th') == 3
latex = t._repr_latex_()
assert latex.count(r'\bf') == 3
def test_escape():
inp_expected = (('', ''),
('&', r'\&'),
('\\', r'{\textbackslash}'),
('~', r'{\textasciitilde}'),
('$', '\$'),
('\n', r'{\linebreak}'),
('\r', r'{\linebreak}'),
('\r\n', r'{\linebreak}'),
('_', r'\_'),
('{', '\{'),
('}', '\}'),
('body & mind & r&d', r'body \& mind \& r\&d'),
(r'\_/~\_/',
r'{\textbackslash}\_/{\textasciitilde}'
r'{\textbackslash}\_/'),
('~_$\r\n{}',
r'{\textasciitilde}\_\${\linebreak}\{\}'))
for inp, expected in inp_expected:
cell = TableCell(inp)
assert cell._repr_latex_() == expected
def default_table():
"Returns the unmodified base table for testing"
t = Table((1,2,3),
(4,5,6),
(7,8,9))
return t
def col_span_table():
"Returns the table used in the col_span tests"
t = default_table()
cell_1_1 = t.rows[0].cells[0]
cell_1_1.col_span = 2
return t
def test_col_span_html():
"This tests that col_span works in html"
t = col_span_table()
#actual_col_span_html(t)
t1_html = t._repr_html_()
row_split = re.compile('<\s*tr\s*>')
lines = row_split.split(t1_html)
assert len(lines)==4
col_split = re.compile('>[\s\d\s]*<')
parts = col_split.split(lines[1])
cl_check = re.compile('colspan\s*=\s*"\s*2\s*"')
assert len(cl_check.findall(parts[0]))>0
def _actual_col_span_html(t):
"For testing at low level modifications and cell method"
t1_html = t._repr_html_()
row_split = re.compile('<\s*tr\s*>')
lines = row_split.split(t1_html)
assert len(lines)==4
col_split = re.compile('>[\s\d\s]*<')
parts = col_split.split(lines[1])
cl_check = re.compile('colspan\s*=\s*"\s*2\s*"')
assert len(cl_check.findall(parts[0]))>0
#print("pass")
def test_col_span_latex():
"This tests that col_span works in latex"
t = col_span_table()
#actual_col_span_latex(t)
t1_latex = t._repr_latex_()
row_split = re.compile(r'\\\\')
lines = row_split.split(t1_latex)
assert len(lines)==4
col_split = re.compile('&')
parts = col_split.split(lines[0])
cl_check = re.compile('\w*\\multicolumn\s*\{\s*2\s*}')
assert len(cl_check.findall(parts[0]))>0
def _actual_col_span_latex(t):
"For testing at low level modifications and cell method"
t1_latex = t._repr_latex_()
row_split = re.compile(r'\\\\')
lines = row_split.split(t1_latex)
assert len(lines)==4
col_split = re.compile('&')
parts = col_split.split(lines[0])
cl_check = re.compile('\w*\\multicolumn\s*\{\s*2\s*}')
assert len(cl_check.findall(parts[0]))>0
#print("pass")
def cell_method_col_span_table():
"Returns a table modified to test col_span using the cell method"
t = default_table()
t.cell(0, 0).col_span = 2
return t
def test_cell_method_col_span_html():
"This tests that col_span works in html"
t = cell_method_col_span_table()
t1_html = t._repr_html_()
row_split = re.compile('<\s*tr\s*>')
lines = row_split.split(t1_html)
assert len(lines)==4
col_split = re.compile('>[\s\d\s]*<')
parts = col_split.split(lines[1])
cl_check = re.compile('colspan\s*=\s*"\s*2\s*"')
assert len(cl_check.findall(parts[0]))>0
def test_cell_method_col_span_latex():
"This tests that col_span works in latex"
t = cell_method_col_span_table()
t1_latex = t._repr_latex_()
row_split = re.compile(r'\\\\')
lines = row_split.split(t1_latex)
assert len(lines)==4
col_split = re.compile('&')
parts = col_split.split(lines[0])
cl_check = re.compile('\w*\\multicolumn\s*\{\s*2\s*}')
assert len(cl_check.findall(parts[0]))>0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.